repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
BCLibraries/bc-lib-search | 14,396,730,406,512 | e6180d8ca61afa94c9dfc0564ea75dbfd680d6dc | 2fe74d7252afb91bdb1fc10649fde3e8abe1a7a7 | /indexer/index_record.py | 030678eb6430be93198166ed28c81ce05c956fbc | [
"MIT"
]
| permissive | https://github.com/BCLibraries/bc-lib-search | 0bbf931d9a303518a172ad85892ffd2f0a07678d | 68e8404d9529d20e6936a95c86323abd92401ce3 | refs/heads/master | 2020-05-27T14:43:45.266697 | 2018-12-03T21:50:14 | 2018-12-03T21:50:14 | 22,269,009 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class IndexRecord(object):
def __init__(self):
self.title = None
self.author = None
self.subjects = None
self.location = None
self.issn = None
self.isbn = None
self.collections = None
self.series = None
self.callnum = None
self.notes = None
self.toc = None
self.type = None
self.tax1 = []
self.tax2 = []
self.tax3 = []
self.id = None
self.language = None
self.alttitles = None
self.restricted = False
self.oai_string = None | UTF-8 | Python | false | false | 587 | py | 40 | index_record.py | 21 | 0.516184 | 0.511073 | 0 | 22 | 25.727273 | 31 |
decko/sistema-nacional-cultura | 3,848,290,732,734 | 057c7ef133838df983f40ad76594301fefa3faf9 | 9833046fce96cbaa05682b3e244273a4a773fbf4 | /adesao/views.py | 45182b041652b05ce4d2de7eb64a7c38ce2b1bb9 | []
| no_license | https://github.com/decko/sistema-nacional-cultura | aaca65dd14567e1a2e9397087ad67a92a5b2ee91 | f727bf428fcdc9c16776953a0a0a4a8c8bd5897f | refs/heads/master | 2021-01-18T01:51:55.848700 | 2015-05-23T05:29:32 | 2015-05-23T05:29:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView, UpdateView, FormView
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from adesao.models import Municipio, Responsavel, Secretario
from adesao.forms import CadastrarUsuarioForm, CadastrarMunicipioForm
from adesao.forms import CadastrarResponsavelForm, CadastrarSecretarioForm
from wkhtmltopdf.views import PDFTemplateView
# Create your views here.
def index(request):
return render(request, 'index.html')
@login_required
def home(request):
return render(request, 'home.html')
class CadastrarUsuario(CreateView):
form_class = CadastrarUsuarioForm
template_name = 'usuario/cadastrar_usuario.html'
success_url = reverse_lazy('adesao:index')
class CadastrarMunicipio(FormView):
form_class = CadastrarMunicipioForm
template_name = 'prefeitura/cadastrar_prefeitura.html'
success_url = reverse_lazy('adesao:responsavel')
def form_valid(self, form):
self.request.user.usuario.municipio = form.save(commit=True)
self.request.user.usuario.save()
return super(CadastrarMunicipio, self).form_valid(form)
def dispatch(self, *args, **kwargs):
municipio = self.request.user.usuario.municipio
if municipio:
return redirect('alterar_municipio', municipio_id=municipio.id)
return super(CadastrarMunicipio, self).dispatch(*args, **kwargs)
class AlterarMunicipio(UpdateView):
form_class = CadastrarMunicipioForm
model = Municipio
template_name = 'prefeitura/cadastrar_prefeitura.html'
success_url = reverse_lazy('adesao:responsavel')
class CadastrarResponsavel(CreateView):
form_class = CadastrarResponsavelForm
template_name = 'responsavel/cadastrar_responsavel.html'
success_url = reverse_lazy('adesao:index')
def form_valid(self, form):
self.request.user.usuario.responsavel = form.save(commit=True)
self.request.user.usuario.save()
return super(CadastrarResponsavel, self).form_valid(form)
def dispatch(self, *args, **kwargs):
responsavel = self.request.user.usuario.responsavel
if responsavel:
return redirect(
'alterar_responsavel',
responsavel_id=responsavel.id)
return super(CadastrarResponsavel, self).dispatch(*args, **kwargs)
class AlterarResponsavel(UpdateView):
form_class = CadastrarResponsavelForm
model = Responsavel
template_name = 'responsavel/cadastrar_responsavel.html'
success_url = reverse_lazy('adesao:index')
class CadastrarSecretario(CreateView):
form_class = CadastrarSecretarioForm
template_name = 'secretario/cadastrar_secretario.html'
success_url = reverse_lazy('adesao:responsavel')
def form_valid(self, form):
self.request.user.usuario.secretario = form.save(commit=True)
self.request.user.usuario.save()
return super(CadastrarSecretario, self).form_valid(form)
def dispatch(self, *args, **kwargs):
secretario = self.request.user.usuario.secretario
if secretario:
return redirect('alterar_secretario', secretario_id=secretario.id)
return super(CadastrarSecretario, self).dispatch(*args, **kwargs)
class AlterarSecretario(UpdateView):
form_class = CadastrarSecretarioForm
model = Secretario
template_name = 'secretario/cadastrar_secretario.html'
success_url = reverse_lazy('adesao:responsavel')
class MinutaAcordo(PDFTemplateView):
filename = 'minuta_acordo.pdf'
header_template = 'termos/minuta_header.html'
template_name = 'termos/minuta_acordo.html'
show_content_in_browser = True
cmd_options = {
'margin-top': 60,
'header-spacing': 5,
}
def get_context_data(self, **kwargs):
context = super(MinutaAcordo, self).get_context_data(**kwargs)
context['request'] = self.request
return context
class TermoSolicitacao(PDFTemplateView):
filename = 'solicitacao.pdf'
header_template = 'termos/solicitacao_header.html'
template_name = 'termos/solicitacao.html'
show_content_in_browser = True
cmd_options = {
'margin-top': 40,
'header-spacing': 5,
}
def get_context_data(self, **kwargs):
context = super(TermoSolicitacao, self).get_context_data(**kwargs)
context['request'] = self.request
return context
| UTF-8 | Python | false | false | 4,488 | py | 19 | views.py | 5 | 0.708111 | 0.706774 | 0 | 135 | 32.244444 | 78 |
pulumi/pulumi-azure | 16,604,343,609,612 | 64c13c14245b8fa092b1228c8237416bf3e47440 | f487532281c1c6a36a5c62a29744d8323584891b | /sdk/python/pulumi_azure/paloalto/local_rulestack_rule.py | 30a59eb1a3f219538e365700122843e88feb5281 | [
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | https://github.com/pulumi/pulumi-azure | a8f8f21c46c802aecf1397c737662ddcc438a2db | c16962e5c4f5810efec2806b8bb49d0da960d1ea | refs/heads/master | 2023-08-25T00:17:05.290397 | 2023-08-24T06:11:55 | 2023-08-24T06:11:55 | 103,183,737 | 129 | 57 | Apache-2.0 | false | 2023-09-13T05:44:10 | 2017-09-11T20:19:15 | 2023-08-09T05:39:20 | 2023-09-13T05:44:08 | 186,735 | 125 | 46 | 62 | Java | false | false | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LocalRulestackRuleArgs', 'LocalRulestackRule']
@pulumi.input_type
class LocalRulestackRuleArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
applications: pulumi.Input[Sequence[pulumi.Input[str]]],
destination: pulumi.Input['LocalRulestackRuleDestinationArgs'],
priority: pulumi.Input[int],
rulestack_id: pulumi.Input[str],
source: pulumi.Input['LocalRulestackRuleSourceArgs'],
audit_comment: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input['LocalRulestackRuleCategoryArgs']] = None,
decryption_rule_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
inspection_certificate_id: Optional[pulumi.Input[str]] = None,
logging_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
negate_destination: Optional[pulumi.Input[bool]] = None,
negate_source: Optional[pulumi.Input[bool]] = None,
protocol: Optional[pulumi.Input[str]] = None,
protocol_ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a LocalRulestackRule resource.
:param pulumi.Input[str] action: The action to take on the rule being triggered.
:param pulumi.Input[Sequence[pulumi.Input[str]]] applications: Specifies a list of Applications.
:param pulumi.Input['LocalRulestackRuleDestinationArgs'] destination: One or more `destination` blocks as defined below.
:param pulumi.Input[int] priority: The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
:param pulumi.Input[str] rulestack_id: The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
:param pulumi.Input['LocalRulestackRuleSourceArgs'] source: One or more `source` blocks as defined below.
:param pulumi.Input[str] audit_comment: The comment for Audit purposes.
:param pulumi.Input['LocalRulestackRuleCategoryArgs'] category: A `category` block as defined below.
:param pulumi.Input[str] decryption_rule_type: The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
:param pulumi.Input[str] description: The description for the rule.
:param pulumi.Input[bool] enabled: Should this Rule be enabled? Defaults to `true`.
:param pulumi.Input[str] inspection_certificate_id: The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
:param pulumi.Input[bool] logging_enabled: Should Logging be enabled? Defaults to `false`.
:param pulumi.Input[str] name: The name which should be used for this Palo Alto Local Rulestack Rule.
:param pulumi.Input[bool] negate_destination: Should the inverse of the Destination configuration be used. Defaults to `false`.
:param pulumi.Input[bool] negate_source: Should the inverse of the Source configuration be used. Defaults to `false`.
:param pulumi.Input[str] protocol: The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocol_ports: Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "applications", applications)
pulumi.set(__self__, "destination", destination)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "rulestack_id", rulestack_id)
pulumi.set(__self__, "source", source)
if audit_comment is not None:
pulumi.set(__self__, "audit_comment", audit_comment)
if category is not None:
pulumi.set(__self__, "category", category)
if decryption_rule_type is not None:
pulumi.set(__self__, "decryption_rule_type", decryption_rule_type)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if inspection_certificate_id is not None:
pulumi.set(__self__, "inspection_certificate_id", inspection_certificate_id)
if logging_enabled is not None:
pulumi.set(__self__, "logging_enabled", logging_enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if negate_destination is not None:
pulumi.set(__self__, "negate_destination", negate_destination)
if negate_source is not None:
pulumi.set(__self__, "negate_source", negate_source)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if protocol_ports is not None:
pulumi.set(__self__, "protocol_ports", protocol_ports)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
"""
The action to take on the rule being triggered.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def applications(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Specifies a list of Applications.
"""
return pulumi.get(self, "applications")
@applications.setter
def applications(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "applications", value)
@property
@pulumi.getter
def destination(self) -> pulumi.Input['LocalRulestackRuleDestinationArgs']:
"""
One or more `destination` blocks as defined below.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: pulumi.Input['LocalRulestackRuleDestinationArgs']):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
"""
The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="rulestackId")
def rulestack_id(self) -> pulumi.Input[str]:
"""
The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
"""
return pulumi.get(self, "rulestack_id")
@rulestack_id.setter
def rulestack_id(self, value: pulumi.Input[str]):
pulumi.set(self, "rulestack_id", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input['LocalRulestackRuleSourceArgs']:
"""
One or more `source` blocks as defined below.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input['LocalRulestackRuleSourceArgs']):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="auditComment")
def audit_comment(self) -> Optional[pulumi.Input[str]]:
"""
The comment for Audit purposes.
"""
return pulumi.get(self, "audit_comment")
@audit_comment.setter
def audit_comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "audit_comment", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input['LocalRulestackRuleCategoryArgs']]:
"""
A `category` block as defined below.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input['LocalRulestackRuleCategoryArgs']]):
pulumi.set(self, "category", value)
@property
@pulumi.getter(name="decryptionRuleType")
def decryption_rule_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
"""
return pulumi.get(self, "decryption_rule_type")
@decryption_rule_type.setter
def decryption_rule_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decryption_rule_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should this Rule be enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="inspectionCertificateId")
def inspection_certificate_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
"""
return pulumi.get(self, "inspection_certificate_id")
@inspection_certificate_id.setter
def inspection_certificate_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inspection_certificate_id", value)
@property
@pulumi.getter(name="loggingEnabled")
def logging_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should Logging be enabled? Defaults to `false`.
"""
return pulumi.get(self, "logging_enabled")
@logging_enabled.setter
def logging_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "logging_enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Palo Alto Local Rulestack Rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="negateDestination")
def negate_destination(self) -> Optional[pulumi.Input[bool]]:
"""
Should the inverse of the Destination configuration be used. Defaults to `false`.
"""
return pulumi.get(self, "negate_destination")
@negate_destination.setter
def negate_destination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "negate_destination", value)
@property
@pulumi.getter(name="negateSource")
def negate_source(self) -> Optional[pulumi.Input[bool]]:
"""
Should the inverse of the Source configuration be used. Defaults to `false`.
"""
return pulumi.get(self, "negate_source")
@negate_source.setter
def negate_source(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "negate_source", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="protocolPorts")
def protocol_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
"""
return pulumi.get(self, "protocol_ports")
@protocol_ports.setter
def protocol_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "protocol_ports", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _LocalRulestackRuleState:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
audit_comment: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input['LocalRulestackRuleCategoryArgs']] = None,
decryption_rule_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input['LocalRulestackRuleDestinationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
inspection_certificate_id: Optional[pulumi.Input[str]] = None,
logging_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
negate_destination: Optional[pulumi.Input[bool]] = None,
negate_source: Optional[pulumi.Input[bool]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
protocol_ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rulestack_id: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input['LocalRulestackRuleSourceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering LocalRulestackRule resources.
:param pulumi.Input[str] action: The action to take on the rule being triggered.
:param pulumi.Input[Sequence[pulumi.Input[str]]] applications: Specifies a list of Applications.
:param pulumi.Input[str] audit_comment: The comment for Audit purposes.
:param pulumi.Input['LocalRulestackRuleCategoryArgs'] category: A `category` block as defined below.
:param pulumi.Input[str] decryption_rule_type: The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
:param pulumi.Input[str] description: The description for the rule.
:param pulumi.Input['LocalRulestackRuleDestinationArgs'] destination: One or more `destination` blocks as defined below.
:param pulumi.Input[bool] enabled: Should this Rule be enabled? Defaults to `true`.
:param pulumi.Input[str] inspection_certificate_id: The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
:param pulumi.Input[bool] logging_enabled: Should Logging be enabled? Defaults to `false`.
:param pulumi.Input[str] name: The name which should be used for this Palo Alto Local Rulestack Rule.
:param pulumi.Input[bool] negate_destination: Should the inverse of the Destination configuration be used. Defaults to `false`.
:param pulumi.Input[bool] negate_source: Should the inverse of the Source configuration be used. Defaults to `false`.
:param pulumi.Input[int] priority: The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
:param pulumi.Input[str] protocol: The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocol_ports: Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
:param pulumi.Input[str] rulestack_id: The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
:param pulumi.Input['LocalRulestackRuleSourceArgs'] source: One or more `source` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if applications is not None:
pulumi.set(__self__, "applications", applications)
if audit_comment is not None:
pulumi.set(__self__, "audit_comment", audit_comment)
if category is not None:
pulumi.set(__self__, "category", category)
if decryption_rule_type is not None:
pulumi.set(__self__, "decryption_rule_type", decryption_rule_type)
if description is not None:
pulumi.set(__self__, "description", description)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if inspection_certificate_id is not None:
pulumi.set(__self__, "inspection_certificate_id", inspection_certificate_id)
if logging_enabled is not None:
pulumi.set(__self__, "logging_enabled", logging_enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if negate_destination is not None:
pulumi.set(__self__, "negate_destination", negate_destination)
if negate_source is not None:
pulumi.set(__self__, "negate_source", negate_source)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if protocol_ports is not None:
pulumi.set(__self__, "protocol_ports", protocol_ports)
if rulestack_id is not None:
pulumi.set(__self__, "rulestack_id", rulestack_id)
if source is not None:
pulumi.set(__self__, "source", source)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action to take on the rule being triggered.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def applications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of Applications.
"""
return pulumi.get(self, "applications")
@applications.setter
def applications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "applications", value)
@property
@pulumi.getter(name="auditComment")
def audit_comment(self) -> Optional[pulumi.Input[str]]:
"""
The comment for Audit purposes.
"""
return pulumi.get(self, "audit_comment")
@audit_comment.setter
def audit_comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "audit_comment", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input['LocalRulestackRuleCategoryArgs']]:
"""
A `category` block as defined below.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input['LocalRulestackRuleCategoryArgs']]):
pulumi.set(self, "category", value)
@property
@pulumi.getter(name="decryptionRuleType")
def decryption_rule_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
"""
return pulumi.get(self, "decryption_rule_type")
@decryption_rule_type.setter
def decryption_rule_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decryption_rule_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input['LocalRulestackRuleDestinationArgs']]:
"""
One or more `destination` blocks as defined below.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input['LocalRulestackRuleDestinationArgs']]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should this Rule be enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="inspectionCertificateId")
def inspection_certificate_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
"""
return pulumi.get(self, "inspection_certificate_id")
@inspection_certificate_id.setter
def inspection_certificate_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inspection_certificate_id", value)
@property
@pulumi.getter(name="loggingEnabled")
def logging_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should Logging be enabled? Defaults to `false`.
"""
return pulumi.get(self, "logging_enabled")
@logging_enabled.setter
def logging_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "logging_enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Palo Alto Local Rulestack Rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="negateDestination")
def negate_destination(self) -> Optional[pulumi.Input[bool]]:
"""
Should the inverse of the Destination configuration be used. Defaults to `false`.
"""
return pulumi.get(self, "negate_destination")
@negate_destination.setter
def negate_destination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "negate_destination", value)
@property
@pulumi.getter(name="negateSource")
def negate_source(self) -> Optional[pulumi.Input[bool]]:
"""
Should the inverse of the Source configuration be used. Defaults to `false`.
"""
return pulumi.get(self, "negate_source")
@negate_source.setter
def negate_source(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "negate_source", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="protocolPorts")
def protocol_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
"""
return pulumi.get(self, "protocol_ports")
@protocol_ports.setter
def protocol_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "protocol_ports", value)
@property
@pulumi.getter(name="rulestackId")
def rulestack_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
"""
return pulumi.get(self, "rulestack_id")
@rulestack_id.setter
def rulestack_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rulestack_id", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['LocalRulestackRuleSourceArgs']]:
"""
One or more `source` blocks as defined below.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['LocalRulestackRuleSourceArgs']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class LocalRulestackRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
audit_comment: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleCategoryArgs']]] = None,
decryption_rule_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleDestinationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
inspection_certificate_id: Optional[pulumi.Input[str]] = None,
logging_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
negate_destination: Optional[pulumi.Input[bool]] = None,
negate_source: Optional[pulumi.Input[bool]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
protocol_ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rulestack_id: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Palo Alto Local Rulestack Rule.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_local_rulestack = azure.paloalto.LocalRulestack("exampleLocalRulestack",
resource_group_name=example_resource_group.name,
location=example_resource_group.location)
example_local_rulestack_rule = azure.paloalto.LocalRulestackRule("exampleLocalRulestackRule",
rulestack_id=example_local_rulestack.id,
priority=1000,
action="Allow",
applications=["any"],
source=azure.paloalto.LocalRulestackRuleSourceArgs(
cidrs=["10.0.0.0/8"],
),
destination=azure.paloalto.LocalRulestackRuleDestinationArgs(
cidrs=["192.168.16.0/24"],
))
```
## Import
Palo Alto Local Rulestack Rules can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:paloalto/localRulestackRule:LocalRulestackRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/myLocalRulestack/localRules/myRule1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The action to take on the rule being triggered.
:param pulumi.Input[Sequence[pulumi.Input[str]]] applications: Specifies a list of Applications.
:param pulumi.Input[str] audit_comment: The comment for Audit purposes.
:param pulumi.Input[pulumi.InputType['LocalRulestackRuleCategoryArgs']] category: A `category` block as defined below.
:param pulumi.Input[str] decryption_rule_type: The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
:param pulumi.Input[str] description: The description for the rule.
:param pulumi.Input[pulumi.InputType['LocalRulestackRuleDestinationArgs']] destination: One or more `destination` blocks as defined below.
:param pulumi.Input[bool] enabled: Should this Rule be enabled? Defaults to `true`.
:param pulumi.Input[str] inspection_certificate_id: The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
:param pulumi.Input[bool] logging_enabled: Should Logging be enabled? Defaults to `false`.
:param pulumi.Input[str] name: The name which should be used for this Palo Alto Local Rulestack Rule.
:param pulumi.Input[bool] negate_destination: Should the inverse of the Destination configuration be used. Defaults to `false`.
:param pulumi.Input[bool] negate_source: Should the inverse of the Source configuration be used. Defaults to `false`.
:param pulumi.Input[int] priority: The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
:param pulumi.Input[str] protocol: The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocol_ports: Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
:param pulumi.Input[str] rulestack_id: The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
:param pulumi.Input[pulumi.InputType['LocalRulestackRuleSourceArgs']] source: One or more `source` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LocalRulestackRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Palo Alto Local Rulestack Rule.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_local_rulestack = azure.paloalto.LocalRulestack("exampleLocalRulestack",
resource_group_name=example_resource_group.name,
location=example_resource_group.location)
example_local_rulestack_rule = azure.paloalto.LocalRulestackRule("exampleLocalRulestackRule",
rulestack_id=example_local_rulestack.id,
priority=1000,
action="Allow",
applications=["any"],
source=azure.paloalto.LocalRulestackRuleSourceArgs(
cidrs=["10.0.0.0/8"],
),
destination=azure.paloalto.LocalRulestackRuleDestinationArgs(
cidrs=["192.168.16.0/24"],
))
```
## Import
Palo Alto Local Rulestack Rules can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:paloalto/localRulestackRule:LocalRulestackRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/myLocalRulestack/localRules/myRule1
```
:param str resource_name: The name of the resource.
:param LocalRulestackRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LocalRulestackRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
audit_comment: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleCategoryArgs']]] = None,
decryption_rule_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleDestinationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
inspection_certificate_id: Optional[pulumi.Input[str]] = None,
logging_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
negate_destination: Optional[pulumi.Input[bool]] = None,
negate_source: Optional[pulumi.Input[bool]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
protocol_ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rulestack_id: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LocalRulestackRuleArgs.__new__(LocalRulestackRuleArgs)
if action is None and not opts.urn:
raise TypeError("Missing required property 'action'")
__props__.__dict__["action"] = action
if applications is None and not opts.urn:
raise TypeError("Missing required property 'applications'")
__props__.__dict__["applications"] = applications
__props__.__dict__["audit_comment"] = audit_comment
__props__.__dict__["category"] = category
__props__.__dict__["decryption_rule_type"] = decryption_rule_type
__props__.__dict__["description"] = description
if destination is None and not opts.urn:
raise TypeError("Missing required property 'destination'")
__props__.__dict__["destination"] = destination
__props__.__dict__["enabled"] = enabled
__props__.__dict__["inspection_certificate_id"] = inspection_certificate_id
__props__.__dict__["logging_enabled"] = logging_enabled
__props__.__dict__["name"] = name
__props__.__dict__["negate_destination"] = negate_destination
__props__.__dict__["negate_source"] = negate_source
if priority is None and not opts.urn:
raise TypeError("Missing required property 'priority'")
__props__.__dict__["priority"] = priority
__props__.__dict__["protocol"] = protocol
__props__.__dict__["protocol_ports"] = protocol_ports
if rulestack_id is None and not opts.urn:
raise TypeError("Missing required property 'rulestack_id'")
__props__.__dict__["rulestack_id"] = rulestack_id
if source is None and not opts.urn:
raise TypeError("Missing required property 'source'")
__props__.__dict__["source"] = source
__props__.__dict__["tags"] = tags
super(LocalRulestackRule, __self__).__init__(
'azure:paloalto/localRulestackRule:LocalRulestackRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
audit_comment: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleCategoryArgs']]] = None,
decryption_rule_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleDestinationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
inspection_certificate_id: Optional[pulumi.Input[str]] = None,
logging_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
negate_destination: Optional[pulumi.Input[bool]] = None,
negate_source: Optional[pulumi.Input[bool]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
protocol_ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rulestack_id: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['LocalRulestackRuleSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'LocalRulestackRule':
"""
Get an existing LocalRulestackRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The action to take on the rule being triggered.
:param pulumi.Input[Sequence[pulumi.Input[str]]] applications: Specifies a list of Applications.
:param pulumi.Input[str] audit_comment: The comment for Audit purposes.
:param pulumi.Input[pulumi.InputType['LocalRulestackRuleCategoryArgs']] category: A `category` block as defined below.
:param pulumi.Input[str] decryption_rule_type: The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
:param pulumi.Input[str] description: The description for the rule.
:param pulumi.Input[pulumi.InputType['LocalRulestackRuleDestinationArgs']] destination: One or more `destination` blocks as defined below.
:param pulumi.Input[bool] enabled: Should this Rule be enabled? Defaults to `true`.
:param pulumi.Input[str] inspection_certificate_id: The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
:param pulumi.Input[bool] logging_enabled: Should Logging be enabled? Defaults to `false`.
:param pulumi.Input[str] name: The name which should be used for this Palo Alto Local Rulestack Rule.
:param pulumi.Input[bool] negate_destination: Should the inverse of the Destination configuration be used. Defaults to `false`.
:param pulumi.Input[bool] negate_source: Should the inverse of the Source configuration be used. Defaults to `false`.
:param pulumi.Input[int] priority: The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
:param pulumi.Input[str] protocol: The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocol_ports: Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
:param pulumi.Input[str] rulestack_id: The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
:param pulumi.Input[pulumi.InputType['LocalRulestackRuleSourceArgs']] source: One or more `source` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LocalRulestackRuleState.__new__(_LocalRulestackRuleState)
__props__.__dict__["action"] = action
__props__.__dict__["applications"] = applications
__props__.__dict__["audit_comment"] = audit_comment
__props__.__dict__["category"] = category
__props__.__dict__["decryption_rule_type"] = decryption_rule_type
__props__.__dict__["description"] = description
__props__.__dict__["destination"] = destination
__props__.__dict__["enabled"] = enabled
__props__.__dict__["inspection_certificate_id"] = inspection_certificate_id
__props__.__dict__["logging_enabled"] = logging_enabled
__props__.__dict__["name"] = name
__props__.__dict__["negate_destination"] = negate_destination
__props__.__dict__["negate_source"] = negate_source
__props__.__dict__["priority"] = priority
__props__.__dict__["protocol"] = protocol
__props__.__dict__["protocol_ports"] = protocol_ports
__props__.__dict__["rulestack_id"] = rulestack_id
__props__.__dict__["source"] = source
__props__.__dict__["tags"] = tags
return LocalRulestackRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output[str]:
"""
The action to take on the rule being triggered.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def applications(self) -> pulumi.Output[Sequence[str]]:
"""
Specifies a list of Applications.
"""
return pulumi.get(self, "applications")
@property
@pulumi.getter(name="auditComment")
def audit_comment(self) -> pulumi.Output[Optional[str]]:
"""
The comment for Audit purposes.
"""
return pulumi.get(self, "audit_comment")
@property
@pulumi.getter
def category(self) -> pulumi.Output[Optional['outputs.LocalRulestackRuleCategory']]:
"""
A `category` block as defined below.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter(name="decryptionRuleType")
def decryption_rule_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of Decryption to perform on the rule. Possible values include `SSLInboundInspection`, `SSLOutboundInspection`, and `None` Defaults to `None`.
"""
return pulumi.get(self, "decryption_rule_type")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description for the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destination(self) -> pulumi.Output['outputs.LocalRulestackRuleDestination']:
"""
One or more `destination` blocks as defined below.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Should this Rule be enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="inspectionCertificateId")
def inspection_certificate_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the certificate for inbound inspection. Only valid when `decryption_rule_type` is set to `SSLInboundInspection`.
"""
return pulumi.get(self, "inspection_certificate_id")
@property
@pulumi.getter(name="loggingEnabled")
def logging_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Should Logging be enabled? Defaults to `false`.
"""
return pulumi.get(self, "logging_enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Palo Alto Local Rulestack Rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="negateDestination")
def negate_destination(self) -> pulumi.Output[Optional[bool]]:
"""
Should the inverse of the Destination configuration be used. Defaults to `false`.
"""
return pulumi.get(self, "negate_destination")
@property
@pulumi.getter(name="negateSource")
def negate_source(self) -> pulumi.Output[Optional[bool]]:
"""
Should the inverse of the Source configuration be used. Defaults to `false`.
"""
return pulumi.get(self, "negate_source")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[int]:
"""
The Priority of this rule. Rules are executed in numerical order. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
> **NOTE:** This is the primary identifier of a rule, as such it is not possible to change the Priority of a rule once created.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
"""
The Protocol and port to use in the form `[protocol]:[port_number]` e.g. `TCP:8080` or `UDP:53`. Conflicts with `protocol_ports`. Defaults to `application-default`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="protocolPorts")
def protocol_ports(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies a list of Protocol:Port entries. E.g. `[ "TCP:80", "UDP:5431" ]`. Conflicts with `protocol`.
"""
return pulumi.get(self, "protocol_ports")
@property
@pulumi.getter(name="rulestackId")
def rulestack_id(self) -> pulumi.Output[str]:
"""
The ID of the Local Rulestack in which to create this Rule. Changing this forces a new Palo Alto Local Rulestack Rule to be created.
"""
return pulumi.get(self, "rulestack_id")
@property
@pulumi.getter
def source(self) -> pulumi.Output['outputs.LocalRulestackRuleSource']:
"""
One or more `source` blocks as defined below.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags which should be assigned to the Palo Alto Local Rulestack Rule.
"""
return pulumi.get(self, "tags")
| UTF-8 | Python | false | false | 52,081 | py | 11,864 | local_rulestack_rule.py | 8,604 | 0.641578 | 0.637833 | 0 | 1,074 | 47.49162 | 250 |
D6C92FE5/oucfeed.server | 2,001,454,796,483 | dc1540aea2f8916634f8168c7318b00a3457bbb8 | cb01850ea1a138ce13e4e3e27abeb3085aad7531 | /oucfeed/server/db/__init__.py | e2e83d12cd45672ccc461bf7113975f38ff01c98 | [
"MIT"
]
| permissive | https://github.com/D6C92FE5/oucfeed.server | 92ebaffd52929e84006a75eff962999be7e92f2e | ba9d64c71a1e8b20a5db34c6d4d61500e05a92a4 | refs/heads/master | 2020-12-24T13:28:42.316276 | 2014-05-02T03:12:10 | 2014-05-02T03:12:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
try:
from google.appengine.ext import ndb
except:
from oucfeed.server.db.sqlite import *
else:
from oucfeed.server.db.gae import *
| UTF-8 | Python | false | false | 253 | py | 25 | __init__.py | 19 | 0.70751 | 0.703557 | 0 | 11 | 22 | 82 |
zhenglingl/sass | 13,615,046,370,594 | 83239449c4101f5f05a7226c147c9c3123e0a79f | 33dca20901f90bfbe57c0c35292c8ec7f6aa4048 | /web/views/account.py | 4db62bb6d9e80290a42ada84358812e175ec156c | []
| no_license | https://github.com/zhenglingl/sass | b66dcadebe1d713e0fa2ef16266c3e111f2e7aa4 | bcfbc3d4f4fc9f3f6dd6251323a98326d17e9741 | refs/heads/master | 2023-05-26T18:04:24.751567 | 2020-07-10T11:45:54 | 2020-07-10T11:45:54 | 278,227,833 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
用户账户相关功能:注册、短信、登录、注销
"""
from django.shortcuts import render
from web.forms.account import RegisterModeForm
def register(request):
# 实例化modelform
form = RegisterModeForm()
return render(request,'register.html',{'form':form}) | UTF-8 | Python | false | false | 286 | py | 3 | account.py | 3 | 0.7375 | 0.7375 | 0 | 9 | 25.777778 | 56 |
elim723/ezfit | 7,687,991,470,651 | b0af854814fce1eda0e85b27dd91b4118fd5125f | f461bdf150abdd7f02df2dd6097d10f838ac6fad | /likelihood.py | baa278a3c748f4cca4e9b94f19eb06b0d0ebb12b | []
| no_license | https://github.com/elim723/ezfit | 35280645700018cf1c3668644b7888a5e67e01e0 | b4ca0eb7f88fb9f844bb66767807e383b1de092d | refs/heads/master | 2020-03-25T05:51:20.764704 | 2018-09-10T01:54:59 | 2018-09-10T01:54:59 | 143,469,107 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
####
#### Originally by Michael Larson
#### Modified by Elim Cheung (07/24/2018)
####
#### This script contains the likelihood class. One can
#### choose regular / modified chi2 or poisson / barlow
#### LLH.
####
#### More info on BarlowLLH:
#### http://lss.fnal.gov/archive/other/man-hep-93-1.pdf
####
####################################################################
from __future__ import print_function
from scipy.optimize import minimize, fsolve
import numpy as np
from misc import Map, Toolbox, InvalidArguments
####################################################################
#### constants needed
####################################################################
bins_line = 4 ## need to change _print_line if bins_line is changed
toolbox = Toolbox ()
####################################################################
#### Likelihood class
####################################################################
class Likelihood (object):
''' Likelihood object is a class that perform likelihood
calculation. One can pick between regular / modified
chi2 or poisson / barlow LLH. '''
def __init__ (self, dhisto, method, verbose=1):
''' initialize likelihood object
:type dhisto: dictionary
:param dhisto: data histogram {'H':[], 'H2':[]}
:type method: string
:param method: 'barlow', 'poisson', 'chi2', 'modchi2'
:type verbose: int
:param verbose: If 0, no print out
If 1, basic print out
If 4, detailed print out per bin
'''
self._dhisto = dhisto
self._method = method
self._verbose = verbose
self._shape, self._nbins = self._set_params ()
self._dhisto = Map ({ 'H':self._dhisto.H.flatten (),
'H2':self._dhisto.H2.flatten () })
def __getstate__ (self):
''' get state for pickling '''
return self.__dict__
def __setstate__ (self, d):
''' set state for pickling '''
self.__dict__ = d
def _print_header (self, shape, nbins):
''' print header
:type shape: tuple
:param shape: histogram shape
:type nbins: int
:param nbins: total number of bins of histogram
'''
print ('#### ##################################################')
print ('#### ############### Likelihood Set up ################')
print ('####')
if self._verbose > 1:
print ('#### histogram info : {0} ({1})'.format (shape, nbins))
def _print_info (self, histos, shape):
''' print bin information
:type histos: dictionary
:param histos: {'numucc': {'H':[], 'H2':[]},
'nuecc' : {'H':[], 'H2':[]}, ...}
:type shape: tuple
:param shape: histogram shape
'''
print ('#### data types included ({0}): {1}'.format (len (histos), histos.keys ()))
print ('#### MC / data content:')
print ('#### {0}'.format ('-'*88))
totalmc = self._get_totalmc (histos)
### print every four bins per print out line
for i in np.arange (len (totalmc.H.flatten ())/bins_line):
info = self._get_bininfo (i, totalmc, shape)
# print each four bins
self._print_bin (info)
print ('#### {0}'.format ('-'*88))
print ('####')
def _print_bin (self, info):
''' print the bin information
:type info: a dictionary
:param info: information of the bins in bins_line
'''
line1, line2, line3 = '#### ', '#### ', '#### '
for i in np.arange (bins_line):
line1 += '{'+str(i*2)+':3}th {'+str(i*2+1)+'}' + ' '*6 + '|'
line2 += 'mc: {'+str(i*2)+':6.1f}; d: {'+str(i*2+1)+':6.1f}|'
line3 += 'chi2: {'+str(i)+':7.2f}' + ' '*8 + '|'
print (line1.format (info['nbin'][0], info['index'][0],
info['nbin'][1], info['index'][1],
info['nbin'][2], info['index'][2],
info['nbin'][3], info['index'][3] ))
print (line2.format (info['mc'][0], info['data'][0],
info['mc'][1], info['data'][1],
info['mc'][2], info['data'][2],
info['mc'][3], info['data'][3] ))
print (line3.format (info['chi2'][0], info['chi2'][1],
info['chi2'][2], info['chi2'][3]))
def _print_barlow (self):
''' print initial barlow setting '''
print ('#### init ps: {0}'.format(self._ps))
print ('#### init norms: {0}'.format(self._norms))
if self._verbose > 3:
line = '#### N{0}, init weighted, init meanw * A {0}: {1}, {2}, {3}'
for j in np.arange (len (self._dtypes)):
print (line.format(j, self._Nj[j], np.sum (self._ohistos[j]),
np.sum (self._meanw[j] * self._unohistos[j]) ))
def _set_params (self):
''' set internal parameters and print info
:return shape: tuple
shape: histogram shape
:return nbins: int
nbins: total number of bins of histogram
'''
shape = self._dhisto.H.shape
nbins = len (self._dhisto.H.flatten())
self._print_header (shape, nbins)
return shape, nbins
def set_histos (self, histos):
''' set the current histograms from all data types
:type histos: dictionary
:param histos: {'numucc': {'H':[], 'H2':[]},
'nuecc' : {'H':[], 'H2':[]}, ...}
'''
## change self._histos from dictionary to flattened numpy array
self._histos = histos
self._dtypes = sorted ([ dtype for dtype in self._histos ])
self._ohistos, self._vhistos = self._order_histos (self._histos)
if self._verbose > 3: self._print_info (histos, self._shape)
def set_barlow (self, unhistos, norms):
''' set unweighted histograms for barlow
:type unhistos: dictionary
:param unhistos: unweighted histograms {'numucc': {'H':[], 'H2':[]},
'nuecc' : {'H':[], 'H2':[]},
...}
:type norm: a dictionary
:param norm: normalization factors {'numucc':x, 'nuecc':x,
'nutaucc':x, ...}
'''
## change self._unhistos/self._norms from dictionary to numpy array
self._unohistos, self._unvhistos = self._order_histos (unhistos)
self._norms = np.array ([ norms[dtype] for dtype in self._dtypes ])
## these variables are based on barlow paper
self._Nj = np.array ([ np.sum (self._ohistos[j] / self._norms[j])
for j in np.arange (self._ndtypes) ]).astype (float)
self._ps = np.array ([ self._norms[j] * np.sum (self._ohistos[j]) / self._Nj[j]
for j in np.arange (self._ndtypes) ])
self._meanw = self._get_meanw ()
## print init info
if self._verbose > 3: self._print_barlow ()
return
def _order_histos (self, histos):
''' order histograms in order of self._dtypes
:type histos: dictionary
:param histos: {'numucc': {'H':[], 'H2':[]},
'nuecc' : {'H':[], 'H2':[]}, ...}
:return ohistos: a multi-dimensional array
ohistos: flattened histograms in order of self._dtypes
:return vhistos: a multi-dimensional array
vhistos: flattened variances in order of self._dtypes
'''
ohistos, vhistos = [], []
for dtype in self._dtypes:
ohistos.append (histos[dtype]['H'].flatten ())
vhistos.append (histos[dtype]['H2'].flatten ())
return np.array (ohistos), np.array (vhistos)
def _get_bininfo (self, index, totalmc, shape):
''' obtain informations from every four bins_line
:type index: int
:param index: the bin index in the flattened histogram
:type totalmc: a dictionary
:param totalmc: total mc dictionary {'H':[],'H2':[]}
:type shape: tuple
:param shape: histogram shape
:return info: a dictionary
info: information of the bins in bins_line
'''
# get information about these four bins
info = {'nbin':[], 'index':[], 'mc':[], 'data':[], 'chi2':[]}
nbins = np.linspace (index*bins_line,
index*bins_line+bins_line-1,
bins_line).astype (int)
for nbin in nbins:
index = np.unravel_index (nbin, shape)
mc = totalmc.H.flatten ()[nbin]
mc2 = totalmc.H2.flatten ()[nbin]
data, data2 = self._dhisto.H[nbin], self._dhisto.H2[nbin]
chi2 = (mc - data)**2 / (mc2 + data2)
info['nbin'].append (nbin)
info['index'].append (index)
info['mc'].append (round (mc, 1))
info['data'].append (round (data, 1))
info['chi2'].append (round (chi2, 2))
return info
def _get_meanw (self):
''' get averaged mean weights per bin
see Barlow paper for why it is needed
'''
## weighted / unweighted histograms (excluding normalization factors)
weighted = np.array ([ self._ohistos[j] / self._norms[j]
for j in np.arange (self._ndtypes) ])
weighted_unweighted = np.nan_to_num (weighted/self._unohistos)
## massage empty bins
for j in np.arange (self._ndtypes):
## check if any bin in each dtype have ratio = 0
indicies = weighted_unweighted[j]==0
## special treatment if this dtype has no events in every bin
if not np.sum(indicies)==0:
m = weighted_unweighted[j][np.logical_not (indicies)]
# Weird case: every bin has a total weight of 0. This
# can happen if you have 0 of one of the neutrino
# types [eg, taus]
if np.sum(m)==0: continue
weighted_unweighted[j][indicies] = np.min (m)
return weighted_unweighted
def _get_totalmc (self, histos):
''' get total mc from histograms of all data types
:type histos: dictionary
:param histos: {'numucc': {'H':[], 'H2':[]},
'nuecc' : {'H':[], 'H2':[]}, ...}
:return totalmc: a dictionary
totalmc: total MC {'H':[], 'H2':[]}
'''
mc = Map ({'H':np.zeros (self._shape), 'H2':np.zeros (self._shape)})
for i, dtype in enumerate (self._dtypes):
mc['H'] += histos[dtype]['H']
mc['H2'] += histos[dtype]['H2']
return mc
def get_ts (self):
''' calculate ts values from all bins
test statistics (TS) is either chi2 / 2. or LLH value
:return totalTS: float
totalTS: total ts from all bins
:return binTS: a multi-dimensional array
binTS: ts value per bin (histogram shape)
:return As: multi-dimensional array
As: fitted barlow llh value
'''
## set up variables
isbarlow = True if 'barlow' in self._method else False
As = None
if isbarlow:
if any ([ self._ps[j]<0 for j in np.arange (len (self._dtypes)) ]):
return 1e10
As = np.empty (self._unohistos)
## loop through each bin
binTS, totalTS = [], 0
for nbin in np.arange (self._nbins):
ts, An = self.get_binTS (nbin)
binTS.append (ts)
totalTS += ts
if isbarlow: As[:,nbin] = An
return totalTS, np.array (binTS).reshape (self._shape), As
def get_binTS (self, nbin):
''' get the test statistics for a given bin
ts = chi2 / 2. or llh value
:type nbin: int
:param nbin: index of the flattened histogram
:return ts: float
ts: ts of this bin
:return An: 1D array
An: fitted barlow llh value for all data types
'''
## info of this bin
index = np.unravel_index (nbin, self._shape)
di = self._dhisto.H[nbin]
fi = np.sum ([ self._ohistos[j][nbin]
for j in np.arange (len (self._dtypes)) ])
## print info
#if self._verbose > 3:
# print ('#### +----------- {0}th bin ({1}) -----------+'.format (nbin,
# index))
# print ('#### +---- di, mci: {0}, {1}'.format(di, fi))
## determine TS
if 'chi2' in self._method:
## determine chi2
ts = self._calculate_chi2 (nbin, di, fi) ## chi2 / 2.
An = None
else:
## determine likelihood
ts, An = self._calculate_llh (nbin, di, fi)
## print info
#if self._verbose > 3:
# print ('#### +---- {0}: {1}'.format (self._method, 2*ts))
return ts, An
def _calculate_chi2 (self, nbin, di, fi):
''' get the regular / modified chi2 * 0.5 for a given bin
:type nbin: int
:param nbin: index of the flattened histogram
:type di: float
:param di: data count in the nth bin
:type fi: float
:param fi: total MC count in the nth bin
:return chi2: float
chi2: chi2 / 2. value
'''
## if empty bins: chi2 = 0.
if fi==0: return 0.0
if 'modchi2' in self._method:
## collect total variance from all data types
vfi = np.sum ([ self._vhistos[j][nbin]
for j in np.arange (len (self._dtypes)) ])
return 0.5 * (fi - di)**2 / (fi + vfi)
return 0.5 * (fi - di)**2 / fi
def _calculate_llh (self, nbin, di, fi):
''' get the poisson / barlow LLH value for a given bin
:type nbin: int
:param nbin: index of the flattened histogram
:type di: float
:param di: data count in the nth bin
:type fi: float
:param fi: total MC count in the nth bin
:return llh: float
llh: llh value
:return An: a 1D array
An: fitted value for Barlow LLH (length = n datatypes)
'''
if 'barlow' in self._method:
return self._calculate_barlow (nbin, di, fi)
llh = 0.
if fi > 0: llh += di * np.log (fi) - fi
if di > 0: llh -= di * np.log (di) - di
return -llh, None
def _calculate_barlow (self, nbin, di, fi):
''' get the barlow LLH value for a given nth or ith bin
-- solve for ti (Eq. 26 in Barlow's paper)
-- solve for Ai (Eq. 25 in Barlow's paper)
:type nbin: int
:param nbin: index of the flattened histogram
:type di: float
:param di: data count in the nth bin
:type fi: float
:param fi: total MC count in the nth bin
:return llh: float
llh: llh value
:return An: a 1D array
An: fitted value for Barlow LLH (length = n datatypes)
'''
## ai = unweighted counts in this bins from all data types
## wi = mean weights in this bins from all data types
ai = np.array ([ self._unohistos[j][nbin]
for j in np.arange (self._dtypes) ])
wi = np.array ([ self._meanw[j][nbin] for j in np.arange (self._dtypes) ])
## solve for ti (a scalar)
ti = self._barlow_solve_ti (ai, wi, di)
## solve for Aji (an array of N data types)
## ti may be modified if special case
ti, Ai = self._barlow_solve_Ai (ai, wi, ti)
## solve for fi for this ith bin (fi = a scalar)
fi = np.sum ([ self._ps[j]*wi[j]*Ai[j]
for j in np.arange (len (self._dtypes)) ])
## evaluate barlow LLH
llh = 0
# poisson part
if fi > 0: llh += di * np.log (fi) - fi
if di > 0: llh -= di * np.log (di) - di
# mc uncertainty penalty part
for j in np.arange (len (self._dtypes)):
if Ai[j] > 0:
llh += ai[j] * np.log (Ai[j]) - Ai[j]
llh -= ai[j] * np.log (ai[j]) - ai[j]
# print penalty
if self._verbose > 3: self._print_barlow_penalty (ai, Ai)
return -llh, Ai
def _print_barlow_penalty (self, ai, Ai):
''' print penalty
:type ai: 1D numpy array
:param ai: unweighted counts in this bin from all data types
:type Ai: 1D numpy array
:param Ai: fitted unweighted counts in this bin from all data types
'''
print ('#### ++++++++++++++++++++++++++++++++++++++++++++++++')
print ('#### +---- Penalty due to MC uncertainty')
print ('#### +---- -----------------------------------------+')
print ('#### +---- dtype | ai | Ai | penalty |')
for j in np.arange (len (self._dtypes)):
line = '#### +---- {0:6} | {1:8} | {2:8} | {3:9} |'
penalty = (ai[j]*np.log (Ai[j]) - Ai[j]) - \
(ai[j] * np.log (ai[j]) - ai[j])
print (line.format (j, np.round (ai[j],4), np.round (Ai[j], 4),
np.round (penalty, 2) ))
def _barlow_solve_ti (self, ai, wi, di):
''' solve for ti of nth bin from Eq. 26 in Barlow's paper
ti is a scaler representing the difference bewteen fi and di
:type di: float
:param di: data count in the nth or ith bin
:type wi: 1D numpy array
:param wi: mean weights in this bin from all data types
:type ai: 1D numpy array
:param ai: unweighted counts in this bin from all data types
:return ti: float
ti: checked / modified value of ti
'''
## solve ti
ti_func = lambda ti: di / (1-ti) - \
np.sum ([ self._ps[j]*ai[j]*wi[j] / (1+self._ps[j]*wi[j]*ti)
for j in np.arange (len (self._dtypes)) ])
ti = fsolve (ti_func, 0) [0]
## print info
if self._verbose > 3:
print ('#### +---- current ps : {0}'.format (self._ps))
print ('#### +---- current norms : {0}'.format (self._norms))
w = np.array ([ self._ohistos[j][nbin]
for j in np.arange (len (self._dtypes)) ])
print ('#### +---- weighted counts : {0}'.format (w))
print ('#### +---- unweighted counts: {0}'.format (ai))
print ('#### +---- mean weight : {0}'.format (wi))
print ('#### +---- ti : {0} ({1})'.format (ti,
ti_func (ti)))
## check value of ti
return self._barlow_check_ti (ti, wi)
def _barlow_check_ti (self, ti, wi):
''' check value of ti not to be smaller
than the lowest possible value
:type ti: float
:param ti: value of ti from Eq. 26 in Barlow's paper
:type wi: 1D numpy array
:param wi: mean weights in this bin from all data types
:return ti: float
ti: checked / modified value of ti
'''
## apply boundary conditions to ti according to the paper
max_pw = max ([ self._ps[j]*wi[j]
for j in np.arange (len (self._dtypes)) ])
lowest_ti = -1. / max_pw
if ti < lowest_ti:
## replace ti to the lowest possible value
ti = lowest_ti
## print new info
if self._verbose > 3:
print ('#### +---- ** max p*w : {0}'.format (max_pw))
print ('#### +---- ** max lowest ti : {0}'.format (lowest_ti))
print ('#### +---- ** new ti : {0} ({1})'.format (ti,
ti_func (ti)))
return ti
def _barlow_solve_Ai (self, ai, wi, ti):
''' solve for Ai (Eq. 25 in Barlow's paper)
:type ai: 1D numpy array
:param ai: unweighted counts in this bin from all data types
:type wi: 1D numpy array
:param wi: mean weights in this bin from all data types
:type ti: a float
:param ti: value of ti from Eq. 26 in Barlow's paper
:return ti: a float
ti: updated value if special case is met
:return Ai: a numpy array
Ai: fitted unweighted counts for all data types
'''
## Eq. 20 in Barlow's paper
Ai = np.array ([ ai[j] / (1+self._ps[j]*wi[j]*ti)
for j in np.arange (len (self._dtypes)) ])
if self._verbose > 3: print ('#### +---- Ai : {0}'.format (Ai))
## look for special case (Eq. 20 in Barlow's paper)
ks = [ j for j in np.arange (len (self._dtypes)) if ai[j]==0. and Ai[j]>0. ]
## check for special case
## both Ai and ti are modified if Ai[j] > 0 and ai[j] == 0
if len (ks) > 0:
ti, Ai = self._barlow_check_Ai (ks, wi, ai, ti, Ai)
## for any Ai < 0, LLH max happens at A_i = 0
Ai[Ai<0] = 0.0
return ti, Ai
def _barlow_check_Ai (self, ks, wi, ai, ti, Ai):
''' check value of Ai
special treatment when Ai[j]>0 and ai[j]==0
:type ks: a list
:param ks: indices of special case
:type wi: 1D numpy array
:param wi: mean weights in this bin from all data types
:type ai: 1D numpy array
:param ai: unweighted counts in this bin from all data types
:type ti: a float
:param ti: value of ti from Eq. 26 in Barlow's paper
:type Ai: a numpy array
:param Ai: fitted unweighted counts for all data types
:return ti: a float
ti: updated value of ti
:return Ai: a numpy array
Ai: updated value of Ai (Eq. 20 in Barlow's paper)
'''
# max p in the special cases
pk = np.max ([ self._ps[k]*wi[k] for k in ks ])
# index of max p*w from all data types
maxk = np.argmax (self._ps*wi)
# update ti
ti = -1./pk
# print info
if self._verbose > 3:
print ('#### +---- ** SPECIAL Ai[j] > 0 && ai[j] == 0 CASE !')
print ('#### +---- ** ks : {0}'.format (ks))
print ('#### +---- ** pk, maxk : {0}, {1}'.format (pk, maxk))
print ('#### +---- ** updated ti : {0}'.format (ti))
print ('#### +++++++++++++++++++++++++++++++++++++')
print ('#### +---- Update Ai')
print ('#### +---- ------------------------------+')
print ('#### +---- dtype | before Ai | after Ai |')
# update Ai with updated ti (Eq. 22 in Barlow's paper)
for j in np.arange (len (self._dtypes)):
if j == maxk:
newA = di / (1+pk) - np.sum ([ self._ps[m]*wi[m]*ai[m] /
(pk-self._ps[m]*wi[m])
for m in np.arange (len (self._dtypes))
if not m==j ])
else:
newA = ai[j] / (1+self._ps[j]*wi[j]*ti)
if self._verbose > 3:
print ('#### +---- {0:5} | {1:9} | {2:8} |'.format (j, Ai[j], newA))
Ai[j] = newA
if self._verbose > 3:
print ('#### +++++++++++++++++++++++++++++++++++++')
print ('#### +---- ** updated Ai : {0}'.format (Ai))
return ti, Ai
| UTF-8 | Python | false | false | 24,957 | py | 13 | likelihood.py | 12 | 0.455383 | 0.444244 | 0 | 675 | 35.973333 | 91 |
DeyangWang3849/LeetcodeRepository | 19,052,474,949,166 | cfb2a6690c5c0875518bf692a4200115e0f88c11 | ebb47a8603d81846ca55ef402500fba433898b05 | /003LongestSubstring.py | 8a24c3c0dd97ac683dc15533e2d1d18b2a848b33 | []
| no_license | https://github.com/DeyangWang3849/LeetcodeRepository | 844ed304863fc92585d70fb033d8b9df274f2e09 | 5618e001ef53647a0e15222398bde90508e52cdb | refs/heads/master | 2020-05-01T05:12:48.079628 | 2019-03-24T11:52:20 | 2019-03-24T11:52:20 | 177,295,609 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def lengthOfLongestSubstring(s:str):
diction = {}
lenList = []
if s == '':
return 0
diction[s[0]]=0
lenList.append(1)
for i in range(1,len(s)):
if diction.__contains__(s[i]):
lenList.append(min(i - diction[s[i]],lenList[i-1]+1))
diction[s[i]]=i
else:
diction[s[i]]=i
lenList.append(lenList[i-1]+1)
return max(lenList)
if __name__ == '__main__':
while True:
string = input('please input string:\n')
print(lengthOfLongestSubstring(string))
| UTF-8 | Python | false | false | 560 | py | 4 | 003LongestSubstring.py | 4 | 0.528571 | 0.5125 | 0 | 21 | 25.666667 | 65 |
liqichen6688/alpha | 8,658,654,079,029 | 0075d1c8e572e0dcfa335351ff8ad02a438f8f2d | 0513fb90bf9d15f5ba6bba0f9b76595a7a297990 | /XtechAlpha.py | 2b73a7da5dba54c0c9113cf1beeb4c650ff2e764 | []
| no_license | https://github.com/liqichen6688/alpha | f62374b82dbfde4aae10c29458894c4b681b2d04 | 570ce700db4774a3adf78f56e492e7ac242c37e8 | refs/heads/master | 2020-06-05T03:04:44.778032 | 2019-06-17T07:44:44 | 2019-06-17T07:44:44 | 192,291,986 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
from scipy.stats import rankdata
def ts_max(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).max()
def ts_argmax(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).apply(np.argmax)
def ts_argmin(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).apply(np.argmin)
def ts_rank(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).apply(lambda x: rankdata(x)[-1])
def sum(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).sum()
def product(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).apply(np.prod)
def stddev(x: pd.core.frame.DataFrame, d: int) -> pd.core.frame.DataFrame:
return x.rolling(d).std()
| UTF-8 | Python | false | false | 891 | py | 1 | XtechAlpha.py | 1 | 0.685746 | 0.684624 | 0 | 38 | 22.447368 | 77 |
andres06-hub/app_tenderos | 19,301,583,050,256 | cf4daff78c7fce6415c11b0f483f699fa6d16414 | 43319fddc5bb3f50336391d7fd06b370fe2f3d43 | /app.py | 2b21c4517eab9e8ed9f9e0703b8c6c9bd65f4733 | []
| no_license | https://github.com/andres06-hub/app_tenderos | 89b7bf87b5678c447ffbacef3743c1ea5c4c8aba | 8d1af152681c365ad32eebda5bc1ba3cf8555d85 | refs/heads/main | 2023-08-16T13:55:17.047356 | 2021-10-01T02:06:56 | 2021-10-01T02:06:56 | 410,353,185 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Se importa flask
import re
from data import usuario
from data.usuario import Usuario
from itertools import product
from flask import Flask, request, render_template, session, redirect, url_for
from flask import json
from flask.helpers import flash
from flask.json import jsonify
# # se importa la libreria bcrypt para la encriptacion
import bcrypt
# Se importa los objetos
# from usuario import Usuario
# from admin import Admin
# from data import Usuario
from data import Admin, admin
from db_user_admin import usuarios,admins,saldo_limite,usuarios_encontrados
app = Flask('tenderos')
# ..................................
simbolo_peso = "$"
# ###########################################
# RUTAS SOLAMENTE PARA MOSTRAR EN PANTALLA --> GET
# ###########################################
@app.route('/')
def interfaz_navegacion():
if "usuarios" in session:
return redirect(url_for('interfaz_workspace'))
return render_template('landing.html')
@app.route('/login')
def interfaz_login():
return render_template('login.html')
@app.route('/signUp')
def interfaz_signUp():
return render_template('signUp.html')
@app.route('/crear_usuario')
def interfaz_crear_usuario():
return render_template('crear-usuario.html')
@app.route('/edit_saldo')
def interfaz_saldo():
return render_template('editar-saldo.html')
@app.route('/editar_usuario/<id>')
def interfaz_editar_usuario(id):
usuario_encontrado = tuple(usuario for usuario in usuarios if usuario.documento == id)
if len(usuario_encontrado) > 0:
# Encuentro el usuario
usuario = usuario_encontrado[0]
return render_template('edit_user.html', usuario=usuario)
else:
return {'mensaje': f'No se ha encontrado el usuario con el id {id}'}
@app.route('/workspace/usuarios')
def interfaz_workspace():
# Verificamos las cookies si el usuario ya ha iniciado sesion
# if not "usuario" in session:
# return redirect(url_for('interfaz_navegacion'))
# Obtengo todos los usuarios que estan registrados
# Se crea una lista para guardar los usuarios registrados y poderlos imprimir
long = len(usuarios)
print(f"{long=}")
# mensaje1 = documento
# obtendo el saldo limite de la lista
saldo = saldo_limite[0]
simbolo_moneda = simbolo_peso
# return{'mensaje':f"{long} ---"}
return render_template('workspace.html', usuarios=usuarios,longitud_usuarios=long, cuantos_usuarios_registrados=long, simbolo_moneda=simbolo_moneda,saldo=saldo)
# @app.route('/')
# def __():
# return
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# INTERFACES DE ERRORES
@app.route('/error_login')
def interfaz_login_incorrecto():
return render_template('login-incorrecto.html')
@app.route('/ya_exite')
def interfaz_ya_existe():
return render_template('ya-existe-admin.html')
@app.route('/EROOR-404')
def interfaz_no_encontrado():
return render_template('no-encontrado.html')
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# #################################
# INTERFACES CORRECTAS
@app.route('/logueado')
def interfaz_login_correcto():
return render_template('logueado.html')
@app.route('/registro-exitoso')
def interfaz_registro_exitoso():
return render_template('registro-exitoso.html')
# # #######################################
# Interfaces para los usuarios
@app.route('/usuario-registrado')
def interfaz_usuario_ya_registrado():
return render_template('ya_registrado_usuario.html')
@app.route('/usuarios_encontrados')
def interfaz_usu_encontrados():
# CANTIDAD DE USUARIOS ENCONTRADOS
cantidad_user_found = len(usuarios_encontrados)
return render_template('usuarios-encontrados.html', usuarios_encontrados=usuarios_encontrados, cantidad_usuarios=cantidad_user_found)
# ##########################################
@app.route('/buscar_usuario', methods=['POST'])
def get_usuario_buscado():
# Obtengo el dato que me entrega el FrontEnd
documento_usuario = request.form.get("buscar")
# buscar por cada usuario
# Se pasa por cada dato de la lista
for usuario in usuarios:
# Se valida si esta el usuario a buscar
if usuario.documento == documento_usuario:
mensaje = f"USUARIO ENCONTRADO! DOCUMENTO:{usuario.documento} - NOMBRES:{usuario.nombres} - MOVIL:{usuario.movil} - SALDO:{usuario.saldo}"
return {'message':mensaje}
else:
print("hola desde else:")
return redirect(url_for('interfaz_usu_encontrados'))
# return {'NN':'usuario no encontrado'}
@app.route('/login', methods=['POST'])
def get_login():
# Se verifica el ingreso del usuario : tendero : ADMIN
# Los datos eentran por formulario HTML
email = request.form.get("email")
password = request.form.get("password")
# los datos entran por JSON
# email = request.json['email']
# password = request.json['password']
for admin in admins:
# print("for")
if email == admin.correo and password == admin.password:
'''TODO PENDIENTE LAS COOKIES'''
# Si el usuario y contraseña coinciden con los de la BD
# Se verifica la cookie
# Se crea una sesion y se envia una cookie al navegador
# session['usuario'] = email
# session[] =
return redirect(url_for('interfaz_login_correcto'))
# return {'mensaje':'inicio de sesion exitoso',
# 'status':200}
return redirect(url_for('interfaz_login_incorrecto'))
return redirect(url_for('interfaz_no_encontrado'))
# return {'mensaje':'usuario incorrecto',
# 'statusCode':404}
@app.route('/signUp', methods=['POST'])
def get_signUp():
def obtener_usuario(request):
# Se intancia la clase, para crear un Admin nuevo
admin = Admin()
# # Se crea los atributos del Admin
# Los datos eentran por formulario HTML
admin.nombres = request.form.get('nombres')
admin.documento = request.form.get('documento')
admin.correo = request.form.get('correo')
admin.password = request.form.get('password')
# los datos entran por JSON
# admin.nombres = request.json['nombres']
# admin.documento = request.json['documento']
# admin.correo = request.json['correo']
# admin.password = request.json['password']
return admin
# Se llama la funcion
admin_registrado = obtener_usuario(request)
# Se verifica si el admin esta registrado previamente
for admin in admins:
# se compara cada admin registrado con el admin ingresado
if admin_registrado.correo == admin.correo:
return redirect(url_for('interfaz_ya_existe'))
# return jsonify({'message':'Ya hay un usuario registrado con este ',
# 'statuscode':404})
return redirect(url_for('interfaz_registro_exitoso'))
# Se guarda el admin creado a la DB
admins.append(admin_registrado)
print(admin_registrado.correo,"-",admin_registrado.documento)
# Registro exiroso
return redirect(url_for('interfaz_registro_exitoso'))
# return jsonify({'statusCode':200,
# 'mensaje':'Registro exitoso'})
@app.route('/crear_usuario', methods=['POST'])
def post_crear_usuario():
print(request.form.get("movil"))
# definimos funcion para crear USUARIO
def get_usuario(request):
# creamos un nuevo objeto usuario
usuario = Usuario()
# Los datos eentran por formulario HTML
usuario.documento = request.form.get('documento')
usuario.nombres = request.form.get('nombres').lower()
usuario.movil = request.form.get('movil')
usuario.saldo =request.form.get('saldo')
# los datos entran por JSON
# usuario.documento = request.json['documento']
# usuario.nombres = request.json['nombres']
# usuario.movil = request.json['movil']
# usuario.saldo =request.json['saldo']
return usuario
# Se llama la funcion y se guarda en una variable
usuario_registrado = get_usuario(request)
# Se verifica que el usuario no este registrado previamente
for usuario in usuarios:
if usuario.documento == usuario_registrado.documento:
'''JSON'''
# Si el usuario esta registrado se retornara lo siguiente
usuario = usuario.documento
return render_template('aviso-usuario-registrado.html',usuario=usuario)
# return redirect(url_for('interfaz_usuario_ya_registrato'))
# Si el usuario NO esta registrado se retornara lo siguiente:
usuarios.append(usuario_registrado)
usuario = usuario_registrado.documento
nombre = usuario_registrado.nombres
return render_template('usuario-registrado.html', usuario=usuario, nombres=nombre)
# #########################
# REUTAS - PUT
# ########################
@app.route('/edit_usuario', methods=['POST'])
def post_edit_usuario():
# Busco el usuario registrado en la base de datos con el id
documento = request.form.get('documento')
print(request.form)
if documento:
print("Documento")
# Verifico el documento con el usuario
resultados = tuple(usuario for usuario in usuarios if usuario.documento == documento)
if len(resultados)>0:
usuario = resultados[0]
print('Usuario encontrado')
usuario.nombres = request.form.get('nombres')
usuario.movil = request.form.get('movil')
usuario.saldo = float(request.form.get('saldo'))
saldo_antiguo = usuario.saldo
# Saldo obtenido desde el 'FORM'
editar_saldo = float(request.form.get('editarSaldo'))
operacion = saldo_antiguo + editar_saldo
usuario.saldo = operacion
print(vars(usuario))
return redirect(url_for('interfaz_workspace'))
@app.route('/edit_limite_creditos', methods=['POST'])
def limite_saldo():
print(saldo_limite)
saldo_nuevo = 0
saldo_antiguo = 0
saldo_nuevo = int(request.form.get('saldo'))
# El elemnto que estaba en la lista
saldo_antiguo = saldo_limite[0]
saldo_limite[0] = saldo_nuevo
print(saldo_limite)
# Se define el mensaje a mostrar
mensaje =f"El saldo se edito correctamente SALDO ANTIGUO {simbolo_peso}{saldo_antiguo} SALDO NUEVO {simbolo_peso}{saldo_nuevo}"
# return redirect(url_for('interfaz_navegacion'))
# return {'menssage':mensaje}
return render_template('saldo_editado_correctamente.html', saldo_antiguo=saldo_antiguo, saldo_nuevo=saldo_nuevo)
# ########################################
# RUTAS DELETE
# ########################################
@app.route('/eliminar_usuario/<id>')
def interfaz_eliminar_usuario(id):
usuario_encontrado = tuple(usuario for usuario in usuarios if usuario.documento == id)
if (len(usuario_encontrado) > 0):
# Obtenemos el dato encontrado en el generador (tuple)
usuario = usuario_encontrado[0]
return render_template('eliminar-usuario.html', usuario=usuario)
else:
return {'mensaje': f'No se ha encontrado el usuario con el id {id}'}
@app.route('/eliminar_usuario0', methods=['POST'])
def eliminar_usuario():
documento = request.form.get('documento')
for usuario in usuarios:
if (usuario.documento == documento):
usuarios.pop(usuario.documento)
# usuarios.pop(usuario.nombres)
# usuarios.pop(usuario.movil)
# usuarios.pop(usuario.saldo)
return {'mensaje':'Usuario eliminado'}
else:
return{'mensaje':'usuario no found'}
'''
'''
# //////////////////////////////////////
# CERAR SESION
@app.route('/sesion_cerrada')
def sesion_cerrada():
return render_template('sesion_cerrada_exitosamente.html')
if __name__ == "__main__":
app.run(debug=True, port=5000)
| UTF-8 | Python | false | false | 12,068 | py | 27 | app.py | 5 | 0.636612 | 0.63396 | 0 | 397 | 29.390428 | 164 |
zhouchch3/TS3D_tapeout | 10,883,447,142,785 | b293bdbd7f2b5e689c49a625bf9791953ae81304 | 69a1fe67746690f08410669ffdbc5c3d26daea25 | /zhoucc/scripts/Statistic.py | d337f13923805aabed875c5080804cceeb72cf72 | []
| no_license | https://github.com/zhouchch3/TS3D_tapeout | 581fafb39f7b1249b9ee0b2eea6e777d138ddf04 | a827670fec12dac2993f948806ef039e21af4e1f | refs/heads/master | 2023-06-23T20:20:22.039109 | 2021-07-24T17:22:23 | 2021-07-24T17:22:23 | 389,029,905 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ***********************************************************************
# statistic weight
# ***********************************************************************
import matplotlib.pyplot as plt
import pandas as pd
import heapq
def Statistic( actBlk, PECMAC_Wei,
Len, NumFtrGrp, NumBlk, NumWei, NumChn, NumPEB, NumPEC, cntfrm,cntBlk, cntPEB,cntPEC ):
NumFtr = NumFtrGrp*NumPEB
CntWeiNotZero_FtrGrp = [ 0 for x in range(NumFtr)]
CntWeiNotZero_FtrGrp_Sort = [ 0 for x in range(NumFtr)]
Statistic_CntWeiNotZero = [[[[0 for x in range(NumFtr)]
for y in range(NumWei)]
for z in range(NumPEC)]
for m in range(NumBlk)]
Statistic_CntWeiNotZero_Filter = [[[[0
for y in range(NumWei)]
for z in range(NumPEC)]
for m in range(NumBlk)]
for x in range(NumFtr)]
Statistic_CntPsumNotZero= [[[[0 for x in range(NumFtr)]
for y in range(NumWei)]
for z in range(NumPEC)]
for m in range(NumBlk)]
Statistic_CntPsumNotZero_PEB_Sort= [[[0 for o in range(Len)]
for x in range(NumFtr)]
for m in range(NumBlk)]
Statistic_CntPsumNotZero_PEB= [[[0 for o in range(Len)]
for x in range(NumFtr)]
for m in range(NumBlk)]
PECMAC_Wei_Sort = [[[[[[ 0 for x in range(NumChn)]
for y in range(NumWei)]
for z in range(NumPEC)]
for m in range(NumPEB)]
for n in range(NumBlk)]
for o in range(NumFtrGrp)]
color = [['lightsteelblue','blue','navy',
'springgreen','lime','forestgreen',
'orchid','m','purple'], #block
['salmon','tomato','lightcoral',
'indianred','red','firebrick',
'maroon','darkred','crimson']]
bar_width = 0.3
Theshold = 2
bottom = [0 for x in range(NumFtr)];
# sort PECMAC_Wei
for cntFtrGrp in range(NumFtrGrp):
for cntBlk in range(NumBlk):
for cntPEB in range(NumPEB):
for cntPEC in range(NumPEC):
for cntwei in range(NumWei):
for cntchn in range(NumChn):
if PECMAC_Wei[cntFtrGrp][cntBlk][cntPEB][cntPEC][cntwei][cntchn] !=0:
CntWeiNotZero_FtrGrp[cntPEB + NumPEB*cntFtrGrp] += 1
CntWeiNotZero_FtrGrp_Sort = heapq.nsmallest(NumFtr,CntWeiNotZero_FtrGrp)
print("CntWeiNotZero_FtrGrp_Sort",CntWeiNotZero_FtrGrp_Sort)
CntWeiNotZero_FtrGrp_Sort_index = list(map(CntWeiNotZero_FtrGrp.index, CntWeiNotZero_FtrGrp_Sort))
print("CntWeiNotZero_FtrGrp_Sort_index", CntWeiNotZero_FtrGrp_Sort_index)
for cntFtrGrp in range(NumFtrGrp):
for cntBlk in range(NumBlk):
for cntPEB in range(NumPEB):
cntFtr = cntPEB + NumPEB*cntFtrGrp
PECMAC_Wei_Sort[cntFtrGrp][cntBlk][cntPEB] = \
PECMAC_Wei[CntWeiNotZero_FtrGrp_Sort_index[cntFtr]//NumPEB][cntBlk][CntWeiNotZero_FtrGrp_Sort_index[cntFtr]%NumPEB]
Statistic_CntPsumNotZero_PEB_Ideal = [[[ 0 for x in range(NumPEB)]
for y in range(NumFtrGrp)]
for z in range(NumBlk)]
for cntFtrGrp in range(NumFtrGrp):
for cntPEB in range(NumPEB):
# Statistic_CntFtrNotZero[cntFtrGrp*cntPEB] = 0;
for cntBlk in range(NumBlk):
Statistic_CntClk_Imbalance_PEB = 0
# Statistic_CntBlkNotZero = 0;
for actrow in range(Len):
for actcol in range(Len):
Statistic_CntClk_Imbalance_Wei_Max = 0
for cntPEC in range(NumPEC):
# Statistic_CntPECNotZero = 0;
for cntwei in range(NumWei):
Statistic_CntClk_Imbalance_Wei = 0
for cntchn in range(NumChn):
if PECMAC_Wei_Sort[cntFtrGrp][cntBlk][cntPEB][cntPEC][cntwei][cntchn] !=0 and \
( actBlk[1][1][cntBlk][actrow][actcol][cntchn] > Theshold or \
actBlk[1][1][cntBlk][actrow][actcol][cntchn] < -Theshold):
# Statistic_CntFtrNotZero[cntFtrGrp*cntPEB] += 1;
# Statistic_CntBlkNotZero[cntFtrGrp*cntPEB][cntBlk] += 1;
# Statistic_CntPECNotZero[cntFtrGrp*cntPEB][cntBlk][cntPEC] += 1;
Statistic_CntClk_Imbalance_Wei += 1
Statistic_CntPsumNotZero[cntBlk][cntPEC][cntwei][cntFtrGrp*NumPEB + cntPEB] += 1;
Statistic_CntPsumNotZero_PEB_Sort[cntBlk][cntFtrGrp*NumPEB + cntPEB][actrow] += 1;
# Statistic_CntPsumNotZero_PEB_Ideal[cntBlk][cntFtrGrp][cntPEB] += 1
if PECMAC_Wei[cntFtrGrp][cntBlk][cntPEB][cntPEC][cntwei][cntchn] !=0 and \
( actBlk[1][1][cntBlk][actrow][actcol][cntchn] > Theshold or \
actBlk[1][1][cntBlk][actrow][actcol][cntchn] < -Theshold):
Statistic_CntPsumNotZero_PEB[cntBlk][cntFtrGrp*NumPEB + cntPEB][actrow] += 1;
if Statistic_CntClk_Imbalance_Wei_Max < Statistic_CntClk_Imbalance_Wei:
Statistic_CntClk_Imbalance_Wei_Max = Statistic_CntClk_Imbalance_Wei
if PECMAC_Wei_Sort[cntFtrGrp][cntBlk][cntPEB][cntPEC][cntwei][cntchn] != 0:
Statistic_CntWeiNotZero[cntBlk][cntPEC][cntwei][cntFtrGrp*NumPEB + cntPEB] += 1;
Statistic_CntWeiNotZero_Filter[cntFtrGrp*NumPEB + cntPEB][cntBlk][cntPEC][cntwei] += 1;
Statistic_CntClk_Imbalance_PEB += Statistic_CntClk_Imbalance_Wei_Max
# if cntBlk == 0:
# plt.bar(x=cntwei+( cntPEB*NumPEC+cntPEC)*NumWei, height =Statistic_CntPsumNotZero[cntBlk][cntPEC][cntwei][cntFtrGrp*NumPEB + cntPEB]
# , bottom =0, label='Blk:'+str(cntBlk)+';PEC:'+str(cntPEC)+';Wei:'+str(cntwei) ,color=color[0][cntwei],width=bar_width);
# else:
# plt.bar(x=cntwei+( cntPEB*NumPEC+cntPEC)*NumWei, height =Statistic_CntPsumNotZero[cntBlk][cntPEC][cntwei][cntFtrGrp*NumPEB + cntPEB]
# , bottom =Statistic_CntPsumNotZero[cntBlk-1][cntPEC][cntwei][cntFtrGrp*NumPEB + cntPEB], label='Blk:'+str(cntBlk)+';PEC:'+str(cntPEC)+';Wei:'+str(cntwei) ,color=color[cntBlk][cntwei],width=bar_width);
plt.bar(x=cntPEB + NumPEB*cntFtrGrp, height =Statistic_CntPsumNotZero_PEB_Sort[cntBlk][cntFtrGrp*NumPEB + cntPEB][actrow]
, bottom =bottom[cntPEB + NumPEB*cntFtrGrp],color=color[0][actrow%9],width=bar_width);
bottom[cntPEB + NumPEB*cntFtrGrp] += Statistic_CntPsumNotZero_PEB_Sort[cntBlk][cntFtrGrp*NumPEB + cntPEB][actrow]
# plt.bar(x=cntPEB + NumPEB*cntFtrGrp, height =Statistic_CntPsumNotZero_PEB[cntBlk][cntFtrGrp*NumPEB + cntPEB][actrow]
# , bottom =bottom[cntPEB + NumPEB*cntFtrGrp],color=color[0][actrow%9],width=bar_width);
# bottom[cntPEB + NumPEB*cntFtrGrp] += Statistic_CntPsumNotZero_PEB[cntBlk][cntFtrGrp*NumPEB + cntPEB][actrow]
# print(cntFtrGrp, cntPEB, cntBlk, Statistic_CntClk_Imbalance_PEB,Statistic_CntPsumNotZero_PEB_Ideal[cntBlk][cntFtrGrp][cntPEB]//(NumPEC*NumWei))
# ******** Psum Bar *****************
# # plt.bar(x=range(NumFtrGrp*NumPEB), height =Statistic_CntFtrNotZero, label='NumberZero of Filter',color='steelblue',width=bar_width)
# # plt.bar(x=range(NumFtrGrp*NumPEB), height =Statistic_CntFtrNotZero, label='NumberZero of Filter',color='steelblue',width=bar_width)
# bottom_psum = [0 for x in range(NumFtr)];
# for cntBlk in range(1):
# for cntPEC in range(1):
# for cntwei in range(NumWei):
# plt.bar(x=np.arange(NumFtr), height =np.array(Statistic_CntWeiNotZero[cntBlk][cntPEC][cntwei])
# , bottom =bottom, label='Blk:'+str(cntBlk)+';PEC:'+str(cntPEC)+';Wei:'+str(cntwei) ,color=color[cntBlk][cntwei],width=bar_width);
# bottom = np.array(bottom) + np.array(Statistic_CntWeiNotZero[cntBlk][cntPEC][cntwei])
# plt.bar(x=np.arange(NumFtr)+bar_width, height =np.array(Statistic_CntPsumNotZero[cntBlk][cntPEC][cntwei])/100
# , bottom =bottom_psum, label='Blk:'+str(cntBlk)+';PEC:'+str(cntPEC)+';Wei:'+str(cntwei) ,color=color[cntBlk][cntwei],width=bar_width);
# bottom_psum = np.array(bottom_psum) + np.array(Statistic_CntPsumNotZero[cntBlk][cntPEC][cntwei])/100
# plt.xticks(np.arange(0,NumFtr,5),np.arange(0,NumFtr,5), rotation=60, fontsize=10)
# plt.savefig('Statistic_psum_PEB_16row_2blk_patch1.jpg')
plt.savefig('Statistic_psum_PEB_Sort.jpg')
print('*'*8 +'Finish Statistic'+'*'*8)
# End Statistic
# *****************************************************
# *****************************
# 3D figure
# fig = plt.figure()
# plt_3d = fig.add_subplot(111,projection='3d')
# color_array = ['r','g','b','k']
# for x in range(Len):
# for y in range(Len):
# for z in range(NumChn): # Only analysis one Block of delta frame
# if actBlk[0][1][0][x][y][z] > 5 or actBlk[0][1][0][x][y][z] < -5:
# plt_3d.scatter(x,y,z,c=color_array[z%4],alpha=0.4, marker='s')#c ,marker, totation,
# plt_3d.view_init(elev=0, azim = 0)
# plt.savefig("Visual_IFM_Block_3D.jpg",dpi=8000)
# 2D figure
# fig = plt.figure()
# for z in range(NumChn): # Only analysis one Block of delta frame
# plt_2d = fig.add_subplot(4,8,z+1)
# for x in range(Len):
# for y in range(Len):
# if actBlk[0][1][0][x][y][z] > 5 or actBlk[0][1][0][x][y][z] < -5:
# plt_2d.scatter(x,y, c='b',marker='s')#c ,marker, totation,
# # plt.savefig("Visual_IFM_channel_2D_"+str(z)+".jpg",dpi=300)
# plt.savefig("Visual_IFM_channel_2D.jpg",dpi=800)
# print('*'*8 +'Finish Visual'+'*'*8)
# # *********************************************************
# *************************************************************
# Get Addition Config for every Wei of 1 PEB
# CfgWei[PEC][Wei][config:help:1/helped:0,begin Addr,help_whichPEC,help_whichWei
NumHelp = 9
Theshold_Help = 2
Largest9cnt = [0 for x in range(NumHelp)]
Smallest9cnt = [0 for x in range(NumHelp)]
CfgWei = [[[ 0 for x in range(4)]
for y in range(NumWei)]
for z in range(NumPEC)]
Largest9Index = [ 0 for z in range(NumHelp)]
Smallest9Index = [0 for z in range(NumHelp)]
Statistic_CntWeiNotZero_CurFilter = [ 0 for x in range(NumWei*NumPEC)]
print(NumPEC, NumWei)
for cntPEC in range(NumPEC):# to 1 D array
for cntwei in range(NumWei):
Statistic_CntWeiNotZero_CurFilter[cntwei+cntPEC*NumWei] = Statistic_CntWeiNotZero_Filter[0][0][cntPEC][cntwei]
print(Statistic_CntWeiNotZero_CurFilter)
Largest9cnt = heapq.nlargest(9,Statistic_CntWeiNotZero_CurFilter)
Smallest9cnt= heapq.nsmallest(9,Statistic_CntWeiNotZero_CurFilter)
print("Statistic_CntWeiNotZero_Filter[0][0]")
print(Statistic_CntWeiNotZero_Filter[0][0])
print("Statistic_CntWeiNotZero_Filter[1][0]")
print(Statistic_CntWeiNotZero_Filter[1][0])
print("Statistic_CntWeiNotZero_Filter[2][0]")
print(Statistic_CntWeiNotZero_Filter[2][0])
print("Largest9cnt")
print(Largest9cnt)
print("Smallest9cnt")
print(Smallest9cnt)
Largest9Index = list(map(Statistic_CntWeiNotZero_CurFilter.index, Largest9cnt))
Smallest9Index = list(map(Statistic_CntWeiNotZero_CurFilter.index, Smallest9cnt))
print(Largest9Index)
print(Smallest9Index)
for cnthelp in range(NumHelp):
if Largest9cnt[cnthelp] - Smallest9cnt[cnthelp] >= Theshold_Help:
[L_PEC,L_Wei]= [Largest9Index[cnthelp]//NumWei, Largest9Index[cnthelp]%NumWei]
CountHelp = (Largest9cnt[cnthelp] - Smallest9cnt[cnthelp])//2
countNotZero = 0
for cntchn in range(NumChn):
if countNotZero == CountHelp:
AddrHelp = cntchn
if PECMAC_Wei[0][0][0][L_PEC][L_Wei][cntchn] != 0:
countNotZero += 1
print("L_PEC L_Wei")
print(L_PEC,L_Wei)
CfgWei[L_PEC][L_Wei] = [0,AddrHelp,0,0] # Error: is Addr not count
[S_PEC,S_Wei]= [Smallest9Index[cnthelp]//NumWei, Smallest9Index[cnthelp]%NumWei]
CfgWei[S_PEC][S_Wei] = [1,AddrHelp,L_PEC,L_Wei]
print("Help config:",CfgWei)
TotalClk = 0
TotalClk_worst = 0
Theshold = 5
CurCntPsum_best = 0
for actrow in range(Len):
for actcol in range(Len):
MaxCntPsum = 0
MaxCntPsum_worst = 0
for cntPEC in range(NumPEC):
for cntwei in range(NumWei):
CurCntPsum = 0
CurCntPsum_worst = 0
if CfgWei[cntPEC][cntwei][0] == 0:
for cntchn in range(CfgWei[cntPEC][cntwei][1]+10,NumChn):
if PECMAC_Wei[0][0][0][cntPEC][cntwei][cntchn] !=0 and \
( actBlk[0][1][0][actrow][actcol][cntchn] > Theshold or actBlk[0][1][0][actrow][actcol][cntchn] < -Theshold):
CurCntPsum += 1
# elif CfgWei[cntPEC][cntwei][0] == 1:
# for cntchn in range(NumChn):# self
# if PECMAC_Wei[0][0][0][cntPEC][cntwei][cntchn] != 0 and \
# ( actBlk[0][1][0][actrow][actcol][cntchn] > Theshold or actBlk[0][1][0][actrow][actcol][cntchn] < -Theshold):
# CurCntPsum += 1
# for cntchn in range(CfgWei[cntPEC][cntwei][1]):#help
# if PECMAC_Wei[0][0][0][CfgWei[cntPEC][cntwei][2]][CfgWei[cntPEC][cntwei][3]][cntchn] != 0 and \
# ( actBlk[0][1][0][actrow][actcol][cntchn] > Theshold or actBlk[0][1][0][actrow][actcol][cntchn] < -Theshold):
# CurCntPsum += 1
# else:
# print('<'*8+' Error Help/Helped '+'>'*8)
if CurCntPsum > MaxCntPsum :
MaxCntPsum = CurCntPsum
# ****************************************
for cntchn in range(NumChn):
if PECMAC_Wei[0][0][0][cntPEC][cntwei][cntchn] !=0 and \
( actBlk[0][1][0][actrow][actcol][cntchn] > Theshold or actBlk[0][1][0][actrow][actcol][cntchn] < -Theshold):
CurCntPsum_worst += 1
CurCntPsum_best += 1
if CurCntPsum_worst > MaxCntPsum_worst :
MaxCntPsum_worst = CurCntPsum_worst
TotalClk += MaxCntPsum
TotalClk_worst += MaxCntPsum_worst
print("TotalClk: "+str(TotalClk))
print("TotalClk_worst: "+str(TotalClk_worst))
print("TotalClk_best :", CurCntPsum_best/(NumPEC*NumWei))
print("Short percentage :"+str(float(TotalClk_worst - TotalClk)/TotalClk_worst*100)+ "%")
return
| UTF-8 | Python | false | false | 16,340 | py | 375 | Statistic.py | 14 | 0.525949 | 0.510343 | 0 | 277 | 57.98917 | 243 |
a376714574/ceshi2 | 3,195,455,699,928 | 8088c9329d4870cb1e72807a746ebf910c5c35bd | 911d3e430fb980d76804a6372ffb8eee917137e2 | /old/new/Requests_Processing.py | d22b5fea5550de638978f90c9a27d440dfe00732 | []
| no_license | https://github.com/a376714574/ceshi2 | a11526d9c1dea0fa33baf0677ff046b35a850b27 | 50130cae01f673522ede3ba43cefff6ff7c1ac0a | refs/heads/master | 2022-12-08T14:50:09.492291 | 2019-05-13T15:31:05 | 2019-05-13T15:31:05 | 186,026,781 | 0 | 0 | null | false | 2022-12-08T01:45:25 | 2019-05-10T17:19:30 | 2019-05-13T15:31:16 | 2022-12-08T01:45:25 | 174 | 0 | 0 | 3 | Python | false | false | # from new_123.excel_rz import excel
# from new_123 import os_path
# from new_123.requests_GP import get_post
# from ddt import ddt,data
# from new_123.mysql_r import mysql
# import unittest
#
#
# @ddt
# class Requests_Cls(unittest.TestCase):
#
# ex_rz=excel(excel_name=os_path.live_path,sheet="register")
# ex_r=ex_rz.excel_read()
# gp = get_post()
# mysql=mysql()
#
# @classmethod
# def setUpClass(cls): #类方法不需要有实例对象直接类名。就可以引用了 cls代表的
# cls.gp.sess_open() #Requests_Cls这个类
# cls.mysql.connect()
#
# @data(*ex_r) #五个测试用例ddt会生成五个test方法 而每次他都会从
# def test_requests(self,ex_r): #ddt开始运行所以up和down就变得非常关键self就代表实例本身
# print(ex_r.data)
# if ex_r.data.find("random")!=-1:
# new=self.mysql.sql_action("select max(mobilephone) from future.member")
# print(new[0])
# new_1=int(new[0])+1
# ex_r.data=ex_r.data.replace("random",str(new_1))
# print(ex_r.data)
# req=self.gp.req_gp(ex_r.mode,ex_r.url,ex_r.data)
#
# try:
# self.assertIn(str(ex_r.Exq),req)
# self.ex_rz.excel_write(ex_r.id,req,"True")
# except Exception as e:
# self.ex_rz.excel_write(ex_r.id,req,"False")
#
# @classmethod
# def tearDownClass(cls):
# cls.gp.sess_close()
# cls.mysql.mysql_close()
data='{"mobilephone":"15810447878","amount":"100"}'
print("amount" in data) | UTF-8 | Python | false | false | 1,640 | py | 41 | Requests_Processing.py | 36 | 0.569241 | 0.547936 | 0 | 46 | 31.673913 | 84 |
sparsh-ai/reco-tut-ocr | 1,537,598,338,089 | 79d0a7021655b072d02eadf5839b49bc32ab4f24 | b587ced7d80ab4c6cb86234882c79adc96d3c0d3 | /recommenders/utils/tools.py | a4ec6465f53e9b60a6c8ffc1f64273fcfbc10679 | []
| no_license | https://github.com/sparsh-ai/reco-tut-ocr | 7969e27d100070838154e1a7193cc6eb95a47cd5 | 979971b95462a2bfe3426cabebb5a007166af709 | refs/heads/main | 2023-07-01T20:34:13.485256 | 2021-08-05T19:16:17 | 2021-08-05T19:16:17 | 383,611,031 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Helper functions for preparing example dataset
import functools
import pandas as pd
from time import time
from database.manager import utils as db_main
from .logger import get_logger
logger = get_logger(__name__)
def load_data(env: str) -> dict:
"""Load Users and Content Data from SQLite
Parameters
----------
env: str
Environment for which database credentials to inherit
"""
df_course = db_main.read_table(env, "select * from user_course_views")
df_asmt = db_main.read_table(env, "select * from user_assessment_scores")
df_interest = db_main.read_table(env, "select * from user_interests")
df_tags = db_main.read_table(env, "select * from course_tags")
return {
"course": df_course,
"assessment": df_asmt,
"interest": df_interest,
"tags": df_tags,
}
def data_summary(data: dict):
"""Print Summary Metrics of Data
Parameters
----------
data: dict
Input dictionary containing dataframes for course,
assessment, interest, and tags, respectively.
"""
for name, df in data.items():
logger.info(f"\nDataframe: {name.upper()} -- Shape: {df.shape}")
for c in df.columns:
unique = len(df[c].unique())
is_null = df[df[c].isnull()].shape[0]
logger.info(f"{c} -- Unique: {unique} -- Null: {is_null}")
return
def preprocess(data: dict) -> dict:
"""Apply series of perprocessing steps such as
renaming columns and encoding categorical variables
for each dataframe.
Parameters
----------
data: data
Input dictionary containing dataframes for course,
assessment, interest, and tags, respectively.
"""
prep = {}
for name, df in data.items():
# drop null values
df.dropna(axis=1, how="all", inplace=True) # course tags table
df.reset_index(drop=True, inplace=True)
# rename columns in dataframe
rename = {
"interest_tag": "tag",
"assessment_tag": "tag",
"course_tags": "tag",
"user_assessment_score": "score",
"view_time_seconds": "view",
}
df.columns = [rename[i] if i in rename.keys() else i for i in df.columns]
# discretize user assessment scores quantile buckets
if any("score" in col for col in df.columns):
df["score"] = pd.qcut(df["score"], q=3, labels=["high", "medium", "low"])
# discretize user viewing time into quantile buckets
if any("view" in col for col in df.columns):
df["view"] = pd.qcut(
df["view"], q=4, labels=["high", "medium", "low", "very low"]
)
# save prep dataframe
prep[name] = df
# add key for max users -> used for initializing user-item matrix
prep["max_users"] = max(
[max(v["user_handle"]) for k, v in prep.items() if "user_handle" in v.columns]
)
# add key containing dataframe for merged course/tags
prep["course_tags"] = pd.merge(
prep["course"], prep["tags"], on="course_id", how="left"
)
return prep
def timer(func):
"""
Wrapper for recording execution time
Format = H:M:S
"""
@functools.wraps(func)
def wrapper_time(*args, **kwargs):
start = time()
func(*args, **kwargs)
end = time()
elapsed_time = end - start
h, r = divmod(elapsed_time, 3600)
m, s = divmod(r, 60)
print(f"Elapsed Time: {h:.0f}H:{m:.0f}M:{s:.0f}s")
return wrapper_time
| UTF-8 | Python | false | false | 3,562 | py | 32 | tools.py | 15 | 0.58338 | 0.57973 | 0 | 118 | 29.186441 | 86 |
JeanPool22/platzi | 9,062,381,004,393 | a4c22038ebe88cde3ca76526eeb98e39fe06e6fc | 4f8d2517143c38241f00bdc56bcc736fc0e94ac7 | /basicoPython/estructuda_de_datos/diccionarios.py | af1995abe0e3861cdc78224ae3e188e1973d8f19 | []
| no_license | https://github.com/JeanPool22/platzi | 25efd34f8f3d5e53bac00798cfe88384c656b69c | 339e750e2aedd831eb61ea70c2d583cb4e414a3b | refs/heads/main | 2023-06-20T20:49:14.124261 | 2021-07-25T01:38:40 | 2021-07-25T01:38:40 | 389,232,169 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def run():
mi_diccionario = {
'llave1': 1,
'llave2': 2,
'llave3': 3
}
# print(mi_diccionario['llave1'])
# print(mi_diccionario['llave2'])
# print(mi_diccionario['llave3'])
poblacion_pais = {
'Argentina': 4493872,
'Brazil': 210147125,
'Colombia': 50372424
}
# print(poblacion_pais['Bolivia'])
# for pais in poblacion_pais.keys():
# print(pais)
# for pais in poblacion_pais.values():
# print(pais)
for pais, poblacion in poblacion_pais.items():
print(pais + ' tiene ' + str(poblacion) + ' habitantes')
if __name__ == "__main__":
run() | UTF-8 | Python | false | false | 657 | py | 47 | diccionarios.py | 18 | 0.538813 | 0.488584 | 0 | 28 | 22.5 | 64 |
Egor-oop/geekshop1 | 5,497,558,155,390 | 00310c7722cf4c83c190b4c5b54f2f37b5698c47 | 58accaadb8d3ae7b8b82085dec35539c487d5090 | /products/views.py | af42e2f70d3d90e5da101e23bdec296073cffd81 | []
| no_license | https://github.com/Egor-oop/geekshop1 | 02b9147d8f442ae87009a3f433d267b5d367ccb1 | 1ce2dc3efdd566718594d4c4a22964f674ada900 | refs/heads/master | 2023-08-25T10:39:32.339421 | 2021-10-17T11:21:15 | 2021-10-17T11:21:15 | 396,658,624 | 2 | 0 | null | false | 2021-10-17T11:21:17 | 2021-08-16T06:45:17 | 2021-10-07T07:30:53 | 2021-10-17T11:21:15 | 10,001 | 1 | 0 | 0 | CSS | false | false | from django.shortcuts import render
from products.models import ProductsCategory, Product
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def index(request):
context = {
'title': 'Geekshop',
}
return render(request, 'products/index.html', context)
def products(request, category_id=None, page=1):
context = {'title': 'GeekShop - Каталог', 'categories': ProductsCategory.objects.all()}
products = Product.objects.filter(category_id=category_id) if category_id else Product.objects.all()
paginator = Paginator(products, per_page=3)
try:
products_paginator = paginator.page(page)
except PageNotAnInteger:
products_paginator = paginator.page(1)
except EmptyPage:
products_paginator = paginator.page(paginator.num_pages)
context['products'] = products_paginator
return render(request, 'products/products.html', context)
| UTF-8 | Python | false | false | 929 | py | 25 | views.py | 15 | 0.720174 | 0.71692 | 0 | 26 | 34.461538 | 104 |
Minhan93/Expected_Risk_AUC | 60,129,570,087 | ba16bfed39d56b67372f470147b5e2df100f872a | 3484cd79e488d2e0b651dd14f236adab0b15d02e | /AUC_code/defObj.py | 2ecf50a9365ab5bf7f6647c750bba5f2704f995f | []
| no_license | https://github.com/Minhan93/Expected_Risk_AUC | 7bea51c7903ab9dd81e5802b99cab3fc75f95dd2 | 36fffef22f9f3f12ece49338f12cb5a5fcee7c89 | refs/heads/master | 2020-04-08T10:31:20.963831 | 2018-11-27T03:56:21 | 2018-11-27T03:56:21 | 159,272,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from numpy import linalg as la
from scipy.stats import norm
import timeit
class defObj:
def __init__(obj, param, data):
obj.max_iter = param.max_iter
obj.gtol = param.opt_tol
obj.inctol = param.opt_inc
obj.p = param.dim
obj.method = param.method
obj.initw = data.initw
obj.X_p = data.X_train_p
obj.X_n = data.X_train_n
obj.N_p = data.N_train_p
obj.N_n = data.N_train_n
obj.algorithm = param.algorithm
obj.name_list = param.name_list
obj.name_data = param.name_data
if (param.method == 'cdf'):
obj.mean_p = data.mean_p
obj.mean_n = data.mean_n
obj.cov_pp = data.cov_pp
obj.cov_nn = data.cov_nn
obj.mean_hat = obj.mean_n - obj.mean_p
obj.cov_hat = obj.cov_pp + obj.cov_nn
# obj.mean_z = 0
# obj.cov_z = 0
obj.H = np.identity(obj.p + 1)
obj.iters = 0
obj.iter_back = 0
obj.mu = param.mu_init
obj.flag_opt = False
obj.message_opt = ' '
obj.initx(obj.initw)
obj.initf()
obj.initdf()
def compute_moments_z(obj, xnew):
if obj.name_data not in obj.name_list:
obj.mean_z = np.dot(xnew, obj.mean_hat) # w mu_hat
obj.cov_mean_z = np.dot(xnew, obj.cov_hat) # w cov_hat
obj.cov_z = np.dot(obj.cov_mean_z, xnew) # w cov_hat w
else:
obj.mean_z = obj.mean_hat.dot(xnew.transpose()) # w cov_pp # w cov_pp w
obj.cov_mean_z = obj.cov_hat.dot(xnew.transpose()) # w cov_nn
obj.cov_z = obj.cov_mean_z.transpose().dot(xnew.transpose()) # w cov_nn w
# compute function value
def evalf(obj, xnew):
if obj.method == 'hinge':
start_time_f = timeit.default_timer()
out_p = np.dot(obj.X_p, xnew) # N_p-by-1 vector
out_n = np.dot(obj.X_n, xnew) # N_n-by-1 vector
loss = 0
sum_p = 0
num_neg = 0
xpre = 0
out_n += 1
out_all = np.hstack((out_p, out_n)) # check size
sort_all = np.sort(out_all)
idx_all = np.argsort(out_all)
N_all = obj.N_n + obj.N_p
for i in range(N_all):
if idx_all[N_all - 1 - i] > obj.N_p - 1: # meaning this is from negtive
sum_p += num_neg * (xpre - sort_all[N_all - 1 - i])
xpre = sort_all[N_all - 1 - i]
num_neg += 1
else:
sum_p += num_neg * (xpre - sort_all[N_all - 1 - i])
loss += sum_p
xpre = sort_all[N_all - 1 - i]
fval = (float(loss) / (obj.N_p * obj.N_n))
end_time_f = timeit.default_timer()
soltime_time_f = end_time_f - start_time_f
# print 'fval is: {} and time for computing fval is: {}'.format(fval, soltime_time_f)
else:
obj.compute_moments_z(xnew)
frac = float(obj.mean_z) / np.sqrt(obj.cov_z)
fval = - norm.cdf(frac) + 0.001 * (1 - (np.linalg.norm(xnew))**2)**2
return fval
def evaldf(obj, xnew):
if obj.method == 'hinge':
start_time_df = timeit.default_timer()
obj.df = np.zeros(obj.p + 1)
out_p = np.dot(obj.X_p, xnew) # N_p-by-1 vector
out_n = np.dot(obj.X_n, xnew) # N_n-by-1 vector
loss = 0
sum_p = 0
num_neg = 0
dfpre = np.zeros(obj.p + 1)
out_n += 1
out_all = np.hstack((out_p, out_n)) # check size
#sort_all = np.sort(out_all)
idx_all = np.argsort(out_all)
N_all = obj.N_n + obj.N_p
for i in range(N_all):
curr_idx = idx_all[N_all - 1 - i]
if curr_idx > obj.N_p - 1: # meaning this is from negtive
sum_p += num_neg * (dfpre - obj.X_n[curr_idx - obj.N_p])
dfpre = obj.X_n[curr_idx - obj.N_p]
num_neg += 1
else:
sum_p += num_neg * (dfpre - obj.X_p[curr_idx])
obj.df += sum_p
dfpre = obj.X_p[curr_idx]
obj.df = obj.df / (obj.N_p * obj.N_n)
end_time_df = timeit.default_timer()
soltime_time_df = end_time_df - start_time_df
obj.normdf = np.linalg.norm(obj.df)
else:
obj.compute_moments_z(xnew)
frac_1 = float(obj.mean_z) / np.sqrt(obj.cov_z)
numinator = np.sqrt(obj.cov_z) * obj.mean_hat - frac_1 * obj.cov_mean_z
frac_2 = float(1) / obj.cov_z
coeff = float(1) / np.sqrt(2 * np.pi)
obj.df = coeff * np.exp(-0.5 * (frac_1**2)) * frac_2 * numinator
obj.df *= -1
obj.df -= 0.001 * 4 * (1 - (np.linalg.norm(xnew))**2) * xnew
if obj.name_data in obj.name_list:
obj.df = np.array(obj.df).reshape(-1,)
obj.normdf = np.linalg.norm(obj.df)
return obj.df
def initx(obj, x0):
obj.x = obj.initw
obj.x_prev = obj.x
def initf(obj):
obj.fval = obj.evalf(obj.x)
obj.fval_prev = obj.fval
def initdf(obj):
obj.evaldf(obj.x)
obj.df_prev = obj.df
| UTF-8 | Python | false | false | 5,487 | py | 14 | defObj.py | 14 | 0.468015 | 0.457445 | 0 | 169 | 31.467456 | 102 |
jabbalaci/teaching-assets | 9,646,496,587,019 | a23db7300af5e21225346a841afa4405d3bd505a | 9ffd4beca0dc486cc94073cbb980a4407bcd379a | /eng/nosql/07-crud_with_pymongo/080_insert_many.py | 753b73fc99f9e3ccbf66ce33351f0e7003ccd849 | []
| no_license | https://github.com/jabbalaci/teaching-assets | bf0c0feb2ae1dd67b12766f63e50381866257deb | 59b6c4bf52e93039afb2da47c5f9ed5c931e5077 | refs/heads/master | 2023-08-05T06:43:39.830595 | 2023-07-23T18:11:27 | 2023-07-23T18:11:27 | 69,950,635 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import pymongo
# establish a connection to the database
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.school
coll = db.people
coll.drop()
def insert_many():
print("insert_many()")
print()
# coll.count() is deprecated in the newest driver :(
print("before: {}".format(coll.estimated_document_count()))
alice = {"_id": "alice", "name": "Alice", "company": "MongoDB",
"interests": ['horses', 'skydiving', 'fencing']}
bob = {"_id": "bob", "name": "Bob", "company": "MongoDB",
"interests": ['running', 'cycling', 'photography']}
cecile = {"_id": "cecile", "name": "Cecile", "company": "MongoDB",
"interests": ['swimming', 'jogging']}
people = [alice, bob]
# people = [alice, bob, alice, cecile]
try:
coll.insert_many(people)
# coll.insert_many(people, ordered=False)
except Exception as e:
print("Unexpected error:", type(e), e)
print("after: {}".format(coll.estimated_document_count()))
#############################################################################
if __name__ == '__main__':
insert_many()
| UTF-8 | Python | false | false | 1,160 | py | 291 | 080_insert_many.py | 105 | 0.559483 | 0.558621 | 0 | 42 | 26.619048 | 77 |
BQSKit/bqskit | 14,620,068,692,741 | e877e90a7933e98b2a42deb213c4809ae7778dce | fcc955fd5b3fc997f5b1651c5c8b9032a6b9b177 | /tests/ir/circuit/test_point_methods.py | e5f69b5e14af4e5b019e68dea56d0553349da677 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
]
| permissive | https://github.com/BQSKit/bqskit | cf393d75b26349f7258e9caf9d5c8fa37d0c8de6 | c89112d15072e8ffffb68cf1757b184e2aeb3dc8 | refs/heads/main | 2023-09-01T04:11:18.212722 | 2023-08-29T17:34:38 | 2023-08-29T17:34:38 | 331,370,483 | 54 | 18 | NOASSERTION | false | 2023-09-14T14:33:26 | 2021-01-20T16:49:36 | 2023-09-11T15:18:08 | 2023-09-14T14:33:26 | 2,920 | 57 | 16 | 11 | OpenQASM | false | false | """This test module tests circuit point methods."""
from __future__ import annotations
import pytest
from hypothesis import given
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates import HGate
from bqskit.ir.point import CircuitPoint
from bqskit.ir.point import CircuitPointLike
from bqskit.utils.test.strategies import circuit_point_likes
from bqskit.utils.test.strategies import circuits
from bqskit.utils.test.types import invalid_type_test
from bqskit.utils.test.types import valid_type_test
from bqskit.utils.typing import is_bool
class TestIsPointInRange:
@valid_type_test(Circuit(1).is_point_in_range)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).is_point_in_range)
def test_invalid_type(self) -> None:
pass
@given(circuit_point_likes())
def test_return_type(self, point: CircuitPointLike) -> None:
circuit = Circuit(1)
assert is_bool(circuit.is_point_in_range(point))
@pytest.mark.parametrize(
'point', [
(-5, -5),
(-4, -4),
(-3, -3),
(-2, -2),
(-1, -1),
],
)
def test_true_neg(self, point: CircuitPointLike) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
assert circuit.is_point_in_range(point)
@pytest.mark.parametrize(
'point', [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
],
)
def test_true_pos(self, point: CircuitPointLike) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
assert circuit.is_point_in_range(point)
@pytest.mark.parametrize(
'point', [
(-1000, 0),
(1, -100),
(-8, -8),
(-6, -6),
(-7, 4),
],
)
def test_false_neg(self, point: CircuitPointLike) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
assert not circuit.is_point_in_range(point)
@pytest.mark.parametrize(
'point', [
(1000, 0),
(1, 100),
(8, 8),
(6, 6),
(5, 4),
(3, 8),
(2, 9),
(8, 2),
],
)
def test_false_pos(self, point: CircuitPointLike) -> None:
circuit = Circuit(5)
for i in range(5):
circuit.append_gate(HGate(), [0])
circuit.append_gate(HGate(), [1])
circuit.append_gate(HGate(), [2])
circuit.append_gate(HGate(), [3])
circuit.append_gate(HGate(), [4])
assert not circuit.is_point_in_range(point)
class TestIsPointIdle:
@valid_type_test(Circuit(1).is_point_idle)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).is_point_idle)
def test_invalid_type(self) -> None:
pass
@given(circuits())
def test_return_type(self, circuit: Circuit) -> None:
for cycle in range(circuit.num_cycles):
for qudit in range(circuit.num_qudits):
assert is_bool(circuit.is_point_idle((cycle, qudit)))
@given(circuits())
def test_not_idle(self, circuit: Circuit) -> None:
points = set()
for cycle, op in circuit.operations_with_cycles():
for qudit in op.location:
assert not circuit.is_point_idle((cycle, qudit))
points.add((cycle, qudit))
for cycle in range(circuit.num_cycles):
for qudit in range(circuit.num_qudits):
if (cycle, qudit) not in points:
assert circuit.is_point_idle((cycle, qudit))
class TestNormalizePoint:
@valid_type_test(Circuit(1).normalize_point)
def test_valid_type(self) -> None:
pass
@invalid_type_test(Circuit(1).normalize_point)
def test_invalid_type(self) -> None:
pass
@given(circuits())
def test_normalize(self, circuit: Circuit) -> None:
for cycle in range(-circuit.num_cycles, circuit.num_cycles):
for qudit in range(-circuit.num_qudits, circuit.num_qudits):
point = (cycle, qudit)
norm_point = circuit.normalize_point(point)
assert isinstance(norm_point, CircuitPoint)
cell1 = circuit._circuit[point[0]][point[1]]
cell2 = circuit._circuit[norm_point[0]][norm_point[1]]
assert cell1 == cell2
assert 0 <= norm_point.qudit < circuit.num_qudits
assert 0 <= norm_point.cycle < circuit.num_cycles
| UTF-8 | Python | false | false | 5,241 | py | 419 | test_point_methods.py | 393 | 0.552757 | 0.533486 | 0 | 161 | 31.552795 | 72 |
bwopez/restaurant_scheduler | 12,876,311,982,967 | 5cf4fb75325765cf37ef6eec3a915fe9a96040cb | 8cc27099d9a5ebf481a56470778435ee33e9b4a3 | /Table.py | e087dd8acaa06680f93221775554ed3ce320f880 | []
| no_license | https://github.com/bwopez/restaurant_scheduler | 5cf8f9ad473af4cb7deea5819852312d319fdfcb | a057eb07263926fc735dafd6441e8d2009228dff | refs/heads/master | 2021-10-21T19:01:51.932396 | 2019-03-05T22:22:51 | 2019-03-05T22:22:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import Person
import menu
class Table:
def __init__(self, _table_num, _chairs=3, _name="walk in"):
"""
Constructor for the Table class
:param _chairs: The amount of chairs set at the table
:param _name: The optional name for a potential reservation or walk in
"""
self.table_num = _table_num
self.chairs = _chairs
self.customers = []
for person in range(self.chairs):
self.customers.append(Person.Person(_name))
self.reservation_holder = self.customers[0]
def get_chairs(self):
"""
Getter for the amount of chairs the table has
:return: The amount of current chairs
"""
return self.chairs
def set_reservation_name(self, _name):
"""
Setter for the reservation name
:param _name: The new name that the reservation should be set to
:return: No return value
"""
self.reservation_holder.set_name(_name)
def get_reservation_name(self):
"""
Getter for the current reservation name
:return: The current reservation name
"""
return self.reservation_holder.get_name()
def add_drink_order(self, chair_num, _drink):
"""
Add a drink order to a certain customer in the chosen seat
:param chair_num: The chosen customer to change order
:param _drink: The drink to add to the customer's order
:return: No return value
"""
self.customers[chair_num].add_drink(_drink)
def add_food_order(self, chair_num, _dish):
"""
Add a dish order to a customer in the chosen seat
:param chair_num: The chosen customer to change order
:param _dish: The dish to add to the customer's order
:return: No return value
"""
self.customers[chair_num].add_food(_dish)
def table_info(self):
"""
Print the names of each of the customers sitting at the table
:return: No return value
"""
for customer in self.customers:
print(customer.get_name())
def table_total(self):
"""
Returns the price of each of the customer's orders combined
:return: The total price of each customer's orders combined
"""
total = 0.00
for customer in self.customers:
total = total + customer.get_total()
return total
if __name__ == "__main__":
new_table = Table(4, 4)
new_table.set_reservation_name("benny")
print("reservation holder", new_table.get_reservation_name())
new_table.add_food_order(3, menu.steak)
new_table.add_drink_order(3, menu.sprite)
print("customer chair #: 3")
for item in new_table.customers[3].get_order():
print(item.get_name())
| UTF-8 | Python | false | false | 2,818 | py | 7 | Table.py | 6 | 0.600071 | 0.596167 | 0 | 89 | 30.662921 | 78 |
feilaoda/easyspider | 3,934,190,092,209 | ee66aa81e00ca5116a59d1f2b9715be665f1ed40 | 6a551a6397c38d9a9dc43ce9378ad93e0344bddc | /setup.py | 08e65833aabcfa31e272a46848dd4bd2fb37a8f1 | [
"Apache-2.0"
]
| permissive | https://github.com/feilaoda/easyspider | d8ae018be749c76126955f75be6ffbac14a4a419 | e80b224a036141b17a21445c94de646d5e4d1482 | refs/heads/master | 2020-12-13T12:52:26.765461 | 2018-08-21T08:49:47 | 2018-08-21T08:49:47 | 37,138,321 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup, find_packages
setup(
name = "coderspider",
version = "0.1",
py_modules = ["spider","dbconfig"],
packages=find_packages(exclude=['tmp/*', 'test/*']),
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.yaml', '*.txt'],
# include any *.msg files found in the 'hello' package, too:
# 'hello': ['*.msg'],
},
author = "feilaoda",
author_email = "azhenglive@gmail.com",
url = "http://easyspider",
description = "news for coders",
)
| UTF-8 | Python | false | false | 590 | py | 21 | setup.py | 18 | 0.542373 | 0.538983 | 0 | 18 | 31.444444 | 69 |
alexioso/CPE365 | 6,244,882,494,850 | cac7205c6f46cd901cb68c0fde96ca1c7493b43e | 73d8f7f18cf7964510973ab82521e34fd1bb25de | /lab1/schoolsearch.py | dc652d80909e6ddec921e9464c84233d9473eba3 | []
| no_license | https://github.com/alexioso/CPE365 | 1a63f11a0e70b64c7e08eb7733b528ca15ca7a3a | be606d2be95cf5e2b7f9c77e78df799326116773 | refs/heads/master | 2021-01-19T07:43:46.288736 | 2017-04-07T15:43:00 | 2017-04-07T15:43:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Mitchel Davis
# Alex Braksator
# April 7 2017
# Lab01 CPE 365
# Trivial Requirements Satisfied
# R1 - Python runs on lab machines
# R2 - No command line parameters are needed
# R3 - Language for Search instructions is implemented See main loop and functions
import time
# Initializing arrays to store attributes of students
sLast = []
sFirst = []
grade = []
classroom = []
bus = []
gpa = []
tLast = []
tFirst = []
# Read the students.txt file and catch any exceptions
# Satisfies requirement R12
try:
f = open('students.txt', 'r')
except Exception:
print("Unable to open file\n")
exit()
# Reads through the file and fills arrays with student info
for line in f.readlines():
student = line.upper().split(',')
sLast.append(student[0])
sFirst.append(student[1])
grade.append(student[2])
classroom.append(student[3])
bus.append(student[4])
gpa.append(student[5])
tLast.append(student[6])
tFirst.append(student[7].rstrip())
f.close()
# Helper function to search an array with a given input string
# @param arr: the array to search through
# @param str1: the search string
# @return a subset of objects from arr that match str1
def search(arr, str1):
inds = []
for i in range(len(arr)):
if arr[i].lower() == str1.lower():
inds.append(i)
return inds
# Prints the average GPA for the specified grade or gives
# error message if there are no students in that grade
# Satisfies requirement R10 and R11
# Also prints the time it took to complete the search
# @param gradenum: the grade to look for
def getavg(gradenum):
start = time.time()
gpas = search(grade, gradenum)
elapsed = time.time() - start
sum = 0
for i in gpas:
sum += float(gpa[i])
if len(gpas) == 0:
print('No students in this grade')
return
avg = sum / len(gpas)
print 'Grade: ' + gradenum + ' Average GPA: ' + str(avg)
print str(elapsed) + "\n"
return
# Prints the information outlined in R8 or gives
# error message if there are no results to show
# Satisfies requirements R8 and R11
# Also prints the time it took to complete the search
# @param busnum: the number of the bus route to search for
def bussearch(busnum):
start = time.time()
buses = search(bus, busnum)
elapsed = time.time() - start
if len(buses) == 0:
print"Bus entered yielded no results"
else:
for i in buses:
print'Student:' + sLast[i] + ", " + sFirst[i] + ' Grade: ' + grade[i] + ' Classroom: ' + classroom[i]
print str(elapsed) + "\n"
return
# Prints the student info outlined in R7 and R9 of students in the specified grade
# or an error message showing that no students are in that grade
# Satisfies requirements R7 R9 and R11
# Also prints the time it took to complete the search
# @param number: the grade number to search for
# @parm optarg: an optional argument. Represents either "H[igh]" or "L[ow]"
# default parameter value is ""
def gradesearch(number, optarg=""):
start = time.time()
grades = search(grade, number)
elapsed = time.time() - start
if len(grades) == 0:
print("Grade entered yielded no results")
elif optarg == "":
for i in grades:
print sLast[i] + ', ' + sFirst[i]
else:
if optarg == 'HIGH' or optarg == 'H':
maxGPA = 0
maxInd = -1
for j in range(len(grades)):
if float(gpa[j]) > maxGPA:
maxGPA = float(gpa[j])
maxInd = j
print 'Student:' + sLast[maxInd] + ', ' + sFirst[maxInd] + ' GPA: ' + gpa[maxInd] + ' Teacher: ' + tLast[maxInd] + ", " + tFirst[maxInd] + ' Bus: ' + bus[maxInd]
elif optarg == 'LOW' or optarg == 'L':
minGPA = 4.0
minInd = -1
for i in range(len(grades)):
if float(gpa[i]) < minGPA:
minGPA = float(gpa[i])
minInd = i
print 'Student:' + sLast[minInd] + ', ' + sFirst[minInd] + ' GPA: ' + gpa[minInd] + ' Teacher: ' + tLast[minInd] + ", " + tFirst[minInd] + ' Bus: ' + bus[minInd]
print str(elapsed) + "\n"
return
# Prints out the students with the specified teacher
# or an error message stating no teacher with that name was found
# Satisfies requirements R6 and R11
# Also prints the time it took to complete the search
# @param t_lname: the last name of the teacher to search for
def teachersearch(t_lname):
start = time.time()
teach = search(tLast, t_lname)
elapsed = time.time() - start
if len(teach) == 0:
print("Last name not found")
else:
for i in teach:
print sLast[i], ', ', sFirst[i]
print str(elapsed) + "\n"
return
# Prints the student info outlined in R4 and R5 of students with specified last name
# or an error message stating no student with that name was found
# Satisfies requirements R4 R5 and R11
# Also prints the time it took to complete the search
# @param stu_lname: the last name of the student to search for
# @parm optarg: an optional argument. Represents either "B[us]"
# default parameter value is ""
def studentsearch(stu_lname, optarg = ""):
start = time.time()
stu = search(sLast, stu_lname)
elapsed = time.time() - start
if len(stu) == 0:
print("Last name not found")
return
if optarg == "BUS" or optarg == "B":
for i in stu:
print sLast[i] + ", " + sFirst[i] + " " + bus[i]
elif optarg == "":
for i in stu:
print sLast[i], sFirst[i], ', Grade: ' + grade[i], ', Classroom: ' + classroom[i], ', Teacher:', tLast[i] + ',', tFirst[i]
print str(elapsed) + "\n"
return
# main loop of program
while True:
inp = raw_input('Enter a search Instruction or \'Q\' to quit: ')
args = inp.strip().upper().split(' ')
print
if args[0] == 'S:' or args[0] == 'STUDENT:':
option = ""
if len(args) == 3:
option = args[2]
if len(args) > 3:
continue
studentsearch(args[1], option)
elif args[0] == 'T:' or args[0] == 'TEACHER:':
if len(args) > 2:
continue
teachersearch(args[1])
elif args[0] == 'G:' or args[0] == 'GRADE:':
number = args[1]
option = ""
if len(args) == 3:
option = args[2]
if len(args) > 3:
continue
gradesearch(number, option)
elif args[0] == 'B:' or args[0] == 'BUS:':
if len(split) > 2:
continue
else:
bussearch(args[1])
elif args[0] == 'A:' or args[0] == 'AVERAGE:':
if len(args) > 2:
continue
getavg(args[1])
elif args[0] == 'Q' or args[0] == 'QUIT':
break;
else:
print('Enter a valid instruction')
exit()
| UTF-8 | Python | false | false | 6,920 | py | 2 | schoolsearch.py | 1 | 0.584827 | 0.572254 | 0 | 230 | 29.086957 | 177 |
openwns/wimemac | 2,619,930,075,762 | f553f4f8d5de9f32ee057a7b707a828d04288cfa | aa858436e0884ffb1674631976b334e4abf0f232 | /PyConfig/wimemac/evaluation/default.py | 34ace252ec899fc1f3910cc6dfb3f3ac7fa42afe | []
| no_license | https://github.com/openwns/wimemac | aaaa760776b0cabb2fa0dd0c6decd09834990de9 | 11d70674338de976b172823c2f036f2f74045e44 | refs/heads/master | 2021-01-18T13:42:52.519942 | 2014-06-29T23:27:19 | 2014-06-29T23:27:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2011
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 5, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openwns.evaluation import *
def installEvaluation(sim, loggingStations):
sourceName = 'wimemac.ARQTransmissionAttempts'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Transmission attempts',
minXValue = 0.0,
maxXValue = 15.0,
resolution = 14))
sourceName = 'wimemac.timeBufferEmpty'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Transmission attempts',
minXValue = 0.0,
maxXValue = 0.02,
resolution = 20000))
sourceName = 'wimemac.packetErrorRate'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Packet error rate',
minXValue = 0.0,
maxXValue = 1.0,
resolution = 1000))
sourceName = 'wimemac.crcLoss'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Loss ratio in CRC',
minXValue = 0.0,
maxXValue = 1.0,
resolution = 1000))
sourceName = 'wimemac.unicastBufferLoss'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Loss ratio in unicast buffer',
minXValue = 0.0,
maxXValue = 1.0,
resolution = 1000))
sourceName = 'wimemac.broadcastBufferLoss'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Loss ratio in broadcast buffer',
minXValue = 0.0,
maxXValue = 1.0,
resolution = 1000))
sourceName = 'wimemac.unicastBufferSize'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Unicast buffer size',
minXValue = 0.0,
maxXValue = 1.0,
resolution = 20))
sourceName = 'wimemac.broadcastBufferSize'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Broadcast buffer size',
minXValue = 0.0,
maxXValue = 1.0,
resolution = 20))
for where in [ 'unicastTop', 'broadcastTop', 'bottom' ]:
for direction in [ 'incoming', 'outgoing', 'aggregated' ]:
for what in [ 'bit', 'compound' ]:
sourceName = 'wimemac.%s.window.%s.%sThroughput' % (where, direction, what)
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(TimeSeries())
for where in [ 'unicastTop', 'broadcastTop' ]:
sourceName = 'wimemac.%s.packet.incoming.delay' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Incoming packet delay (%s)' % where,
minXValue = 0.0,
maxXValue = 0.001,
resolution = 1000))
sourceName = 'wimemac.%s.packet.outgoing.delay' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Outgoing packet delay (%s)' % where,
minXValue = 0.0,
maxXValue = 0.001,
resolution = 1000))
sourceName = 'wimemac.%s.packet.incoming.bitThroughput' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Incoming bit throughput (%s)' % where,
minXValue = 0.0,
maxXValue = 800000000.0,
resolution = 1000))
sourceName = 'wimemac.%s.packet.incoming.size' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Incoming packet size (%s)' % where,
minXValue = 0.0,
maxXValue = 15000.0,
resolution = 1000))
sourceName = 'wimemac.%s.packet.outgoing.size' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Outgoing packet size (%s)' % where,
minXValue = 0.0,
maxXValue = 15000.0,
resolution = 1000))
for where in [ 'bottom' ]:
sourceName = 'wimemac.%s.packet.incoming.delay' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Incoming packet delay (%s)' % where,
minXValue = 0.0,
maxXValue = 0.000001,
resolution = 1000))
sourceName = 'wimemac.%s.packet.outgoing.delay' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Outgoing packet delay (%s)' % where,
minXValue = 0.0,
maxXValue = 0.000001,
resolution = 1000))
sourceName = 'wimemac.%s.packet.incoming.bitThroughput' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Incoming bit throughput (%s)' % where,
minXValue = 0.0,
maxXValue = 1000000000.0,
resolution = 1000))
sourceName = 'wimemac.%s.packet.incoming.size' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Incoming packet size (%s)' % where,
minXValue = 0.0,
maxXValue = 16000.0,
resolution = 1000))
sourceName = 'wimemac.%s.packet.outgoing.size' % where
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Accept(by = 'wns.node.Node.id', ifIn = loggingStations))
node.getLeafs().appendChildren(PDF(name = sourceName,
description = 'Outgoing packet size (%s)' % where,
minXValue = 0.0,
maxXValue = 16000.0,
resolution = 1000))
def installMIHEvaluation(sim, loggingStations):
sourceName = 'wimemac.linkDetectedTriggerLevel'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Separate(by = 'wns.node.Node.id', forAll = loggingStations, format="wns.node.Node.id%d"))
node.getLeafs().appendChildren(TimeSeries())
sourceName = 'wimemac.linkDownTriggerLevel'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Separate(by = 'wns.node.Node.id', forAll = loggingStations, format="wns.node.Node.id%d"))
node.getLeafs().appendChildren(TimeSeries())
sourceName = 'wimemac.berLevel'
node = openwns.evaluation.createSourceNode(sim, sourceName)
node.appendChildren(Separate(by = 'wns.node.Node.id', forAll = loggingStations, format="wns.node.Node.id%d"))
node.getLeafs().appendChildren(TimeSeries())
| UTF-8 | Python | false | false | 11,575 | py | 114 | default.py | 113 | 0.555248 | 0.535637 | 0 | 222 | 51.13964 | 113 |
Jill0fAllTrades/Python_Crash_Course | 18,966,575,600,870 | 8b7c1d63d8e3b723d11701e61bb5995a224cd691 | 8ec920d3d56ebe8c6bfd99f575cc89983bf22a50 | /chapter_2/names.py | ae607d4a333999c349147a42d1278ad10c1e4271 | []
| no_license | https://github.com/Jill0fAllTrades/Python_Crash_Course | ec19a729a9d6feeff74949d0b40609100e1f1c4a | 44862d544dede7ba29a7c6139e5c17c56419def5 | refs/heads/main | 2023-08-04T19:41:43.231426 | 2021-09-14T23:41:14 | 2021-09-14T23:41:14 | 391,203,416 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | names = ['Kathy', 'Josh', 'Cassie']
print("Hello " + names[0] + "!")
print("Hello " + names[1] + "!")
print("Hello " + names[-1] + "!")
| UTF-8 | Python | false | false | 137 | py | 74 | names.py | 74 | 0.49635 | 0.474453 | 0 | 4 | 33 | 35 |
DransyHe/Computing | 19,628,000,564,942 | e3469f9243c65a7565d270029c838c533bdcf0fb | afcdc581aa59e4c1692a67f40564a118e2deeae4 | /3_5.py | f192924d580d437dbaba0ccc5fd2b3d20b3f35b1 | []
| no_license | https://github.com/DransyHe/Computing | 1088a6a14c5876e57fca1b8ef3bff55755d9e5ce | c4897654ce31c9c23d44adb808eee219eb338e09 | refs/heads/master | 2020-03-03T21:10:25.759099 | 2017-03-05T08:38:38 | 2017-03-05T08:38:38 | 83,552,743 | 0 | 0 | null | false | 2017-03-05T08:38:39 | 2017-03-01T12:35:43 | 2017-03-01T12:43:38 | 2017-03-05T08:38:39 | 3 | 0 | 0 | 0 | Python | null | null | f=open("shakespeare-romeo-48.txt","r")
fw=open("R_&_J_Word_Frequencies.txt","w")
words=[]
volcab=[]
count=[]
for line in f:
k=0
while k<len(line):
while (not ((line[k]>="a" and line[k]<="z") or (line[k]>="A" and line[k]<="Z"))):
k=k+1
if k==len(line):
break
if k==len(line):
break
x=""
while (line[k]>="a" and line[k]<="z") or (line[k]>="A" and line[k]<="Z"):
x=x+line[k]
k=k+1
if x in volcab:
count[volcab.index(x)]=count[volcab.index(x)]+1
else:
volcab.append(x)
count.append(1)
for i in range(0,len(volcab)):
fw.write(volcab[i])
fw.write(",")
fw.write(str(count[i]))
fw.write("\n")
| UTF-8 | Python | false | false | 770 | py | 23 | 3_5.py | 20 | 0.463636 | 0.453247 | 0 | 29 | 25.551724 | 89 |
Jjiya/python_openTutorials | 6,975,026,930,436 | 9bb58a48f809e3fd8735d5896250b5d1b7d1345f | f6ca3a0263a1e9a25a948491c30b896ebb951c8d | /syntax/list.py | 42203a5867fbbf0a7b9c32884e5709a97cf3c0e2 | []
| no_license | https://github.com/Jjiya/python_openTutorials | 78d947a560ca592be3136532f090d12f7b6797df | a9a30fcd59ca42e72a3034b423df4706aff8bf03 | refs/heads/master | 2023-08-31T23:08:31.991010 | 2021-11-01T14:08:10 | 2021-11-01T14:08:10 | 370,041,300 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | person = {"name":"Jjiya","age":0}
print(person["name"])
# print(person.name) -> 파이썬은 객체에 접근할 때, person.key가 아니라 배열 가져오듯이 해야되네... 이렇게하면 AttributeError: 'dict' object has no attribute 'name' 에러남
| UTF-8 | Python | false | false | 265 | py | 14 | list.py | 14 | 0.680203 | 0.675127 | 0 | 4 | 48.25 | 139 |
NSLS-II-SMI/ipython_ophyd | 11,871,289,614,642 | 05f9dd675388806e18e3654176347b21158a74ea | b850b803bcd776631eb8619ff74a9d2134cc621d | /startup/users/33-oleg.py | 2da609a9823f9de62512166a581bddc485e93035 | [
"BSD-3-Clause"
]
| permissive | https://github.com/NSLS-II-SMI/ipython_ophyd | 2ac5daa7f530e4900d2265b25ffdd71dceccf2f2 | 70903ca91934ef849c999fe4b4f6607dac21db1f | refs/heads/master | 2021-01-20T18:20:56.793985 | 2021-01-15T20:24:49 | 2021-01-15T20:24:49 | 60,647,450 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | ####line scan
def aaron_rot(t=8):
sample_id(user_name='AM', sample_name='tetrahedral')
det_exposure_time(t)
yield from bp.inner_product_scan([pil1M], 24, prs, 45, 22, stage.x, 0.23, 0.15, piezo.y, -1792.6, -1792.6)
yield from bp.inner_product_scan([pil1M], 22, prs, 21, 0, stage.x, 0.15, 0.11, piezo.y, -1792.6, -1792.6)
yield from bp.inner_product_scan([pil1M], 11, prs, -1, -11, stage.x, 0.11, 0.1, piezo.y, -1792.6, -1792.1)
yield from bp.inner_product_scan([pil1M], 11, prs, -12, -22, stage.x, 0.1, 0.1, piezo.y, -1792.1, -1791.6)
yield from bp.inner_product_scan([pil1M], 11, prs, -23, -33, stage.x, 0.1, 0.114, piezo.y, -1791.6, -1790.9)
yield from bp.inner_product_scan([pil1M], 12, prs, -34, -45, stage.x, 0.114, 0.134, piezo.y, -1790.9, -1790.9)
def brian_caps(t=1):
x_list = [-36500, -30150, -23800, -17450, -11100, -4750, 1600, 7950, 14400, 20700, 27050, 33400, 39850]#
y_list = [ 0, 0, 0, 0, 0, 0, 0, 0, 0]
samples = [ 'test', 'LC-O36-6','LC-O36-7','LC-O36-8','LC-O36-9','LC-O37-6','LC-O37-7','LC-O37-8','LC-O37-9']
# Detectors, motors:
dets = [pil1M]
y_range = [0, 0, 1]
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t,t)
for x, y, sample in zip(x_list,y_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
sample_id(user_name='BM', sample_name=sample)
#yield from bp.scan(dets, piezo.y, *y_range)
yield from bp.count(dets, num=1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def brian_caps_2020_3(t=1):
# samples = ['buffer1', 'GB01', 'GB02', 'GB03', 'GB04', 'GB05', 'GB06', 'GB08', 'GB09', 'GB10', 'GB11', 'GB12']
# samples = ['Y01', 'Y02', 'Y03', 'Y04', 'Y05', 'Y06']
# x_list = [-22300, -18600, -11000, -4500, 2300, 8500, 14500, 21000, 27500, 33800, 40300, 46700]
# y_list = [2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500]
# z_list = [4000, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500]
samples = ['S1_43', 'S1_44', 'S1_45', 'S1_46', 'S1_47', 'S1_48', 'S1_49', 'S1_50', 'S1_51', 'S1_52', 'S1_53', 'S1_54', 'S1_55', 'S1_56', 'S1_57',
'S1_58', 'S1_59', 'S1_60', 'S1_61', 'S1_62', 'S2_63', 'S2_67', 'S2_68', 'S2_69', 'S2_70', 'S2_71']
x_list = [-39100, -32820, -26400, -20240, -13880, -7020, -720, 5390, 11680, 18180, 24560, 31040, 37360, 43820, -37780,
-31530, -24530, -17840, -12100, -5800, 790, 7170, 13000, 19420, 25840, 32260]
y_list = [ 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
z_list = [ 12500, 12500, 12500, 12500, 12500, 12500, 12500,12500, 12500, 12500, 12500, 12500, 12500, 12500, 2000,
2000, 2000, 2000, 2000, 2000,2000, 2000, 2000, 2000, 2000, 2000]
# Detectors, motors:
dets = [pil1M]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of Z coord ({len(z_list)})'
ypos = [0, 50, 2]
det_exposure_time(t,t)
for x, y, z, sample in zip(x_list,y_list,z_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
sample_id(user_name='BM', sample_name=sample + '_test_18.25keV')
# yield from bp.rel_scan(dets, piezo.y, *ypos)
yield from bp.count(dets, num=240)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def brian_caps(t=1):
samples = ['sample44_1', 'sample44_2', 'sample45_1', 'sample45_2','sampleB_1', 'sampleB_2','sampleB_3', 'sampleP_1',
'sampleP_2'
]
x_list = [-41000, -34350, -28400, -22000, -15700, -9350, -2700, 3400,
19200
]
y_list = [7600, 7600, 7700, 8000, 7800, 7500, 7500, 7500,
7500
]
z_list = [9600,9600,9600,9600,9600,9600,9600,9600,
2600
]
# Detectors, motors:
dets = [pil1M]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of Z coord ({len(z_list)})'
ypos = [0, 50, 2]
det_exposure_time(t,t)
for x, y, z, sample in zip(x_list,y_list,z_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
sample_id(user_name='BM', sample_name=sample)
# yield from bp.rel_scan(dets, piezo.y, *ypos)
yield from bp.count(dets, num=1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def run_mesh_aaron(t=1):
name = 'AM'
dets = [pil1M]
det_exposure_time(t,t)
# samples = ['sample_b1_area1', 'sample_b1_area2']
# x_list = [45365, 46145]
# y_list = [-1865, -1895]
# x_range=[[0,150,7], [0,200,9]]
# y_range=[[0,150,76],[0,150,76]]
samples = ['sample_b1_area1_1','sample_b1_area2_1', 'sample_b2_area1', 'sample_b2_area2', 'sample_c1_area1', 'sample_c1_area2',
'sample_c2_area1', 'sample_t1_area1', 'sample_t1_area2']
x_list = [45423, 46344, 22765, 22415, 2040, 540, -19755, -43785, -42785]
y_list = [-2035, -2135, -1165, -1765, -590, -1770, -1095, 480, -120]
x_range=[[0,150,7],[0,150,7], [0,250,13], [0,200,9], [0,300,13],[0,300,13],[0,300,13],[0,500,21],[0,300,13], [0,150,7]]
y_range=[[0,200,101], [0,150,76],[0,150,76],[0,150,76],[0,300,151],[0,200,101],[0,200,101],[0,300,151],[0,200,101],[0,150,76]]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
for x, y, sample, x_r, y_r in zip(x_list, y_list, samples, x_range, y_range):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
name_fmt = '{sam}'
sample_name = name_fmt.format(sam=sample)
sample_id(user_name=name, sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.rel_grid_scan(dets, piezo.y, *y_r, piezo.x, *x_r, 0)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
| UTF-8 | Python | false | false | 6,965 | py | 47 | 33-oleg.py | 46 | 0.562383 | 0.388658 | 0 | 143 | 47.706294 | 149 |
jkrclaro/megaphonely | 10,703,058,519,802 | 8ac0145df4f5af9f2671ce72332949acb707f575 | aa2bb4ef0852029ab88c7caa06d511593c0e1503 | /megaphonely/billing/admin.py | 3af4dbce9f0d649b696533f7101faf993fa1bd1d | []
| no_license | https://github.com/jkrclaro/megaphonely | 0c25ee4c7f0658751f21d2b1df02d52c6283b1aa | 7e6bb5941317d9731e7e14cb1d338e3d6bd54ec8 | refs/heads/master | 2021-03-22T05:13:26.110620 | 2019-12-12T17:28:51 | 2019-12-12T17:28:51 | 104,248,145 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import (Customer, PaymentMethod, Subscription, Plan)
admin.site.register(Customer)
admin.site.register(PaymentMethod)
admin.site.register(Subscription)
admin.site.register(Plan)
| UTF-8 | Python | false | false | 226 | py | 90 | admin.py | 39 | 0.823009 | 0.823009 | 0 | 8 | 27.25 | 65 |
NicolasBertrand/tdcpb-tools | 10,402,410,821,315 | ef24b74313179898d6d69587fc046aace6f2a4d8 | 5150fcd88662e11b44a13754c6c07a739bae1451 | /bin/tdcpb-make-torrent | 71b54c22e48441dd1f079ef9fe42a0a7fa528d9d | []
| no_license | https://github.com/NicolasBertrand/tdcpb-tools | 154ac7a8f66265fde094661b00f28ec96cc7b723 | 848f28da6d2817fcff75bf4438d0c31f49a26cb6 | refs/heads/master | 2020-07-18T23:01:58.624249 | 2017-06-14T15:44:51 | 2017-06-14T15:44:51 | 94,329,618 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# -*- Mode: Python -*-
# vim:si:ai:et:sw=4:sts=4:ts=4
#
#
# Copyright Nicolas Bertrand (nico@isf.cc), 2014
#
# This file is part of DcpIngest.
#
# DcpIngest is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Luciole is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Luciole. If not, see <http://www.gnu.org/licenses/>.
#
#
# Usage:
# TBD
import Queue
import argparse
import threading
import time
import subprocess as SP
import os
import logging
import sys
import tdcpblib.di_parser as T_PARSER
from tdcpblib.common import TdcpbException
TRACKER="http://10.10.10.31:2710/announce"
COMMENT="Created with a slice of lemon pie"
class TorrentCreatorThread (threading.Thread):
def __init__(self, torrent_path, dcp_path, q):
threading.Thread.__init__(self)
#self.threadID = threadID
self.torrent_path = torrent_path
logging.debug("torrent path {}".format(torrent_path))
self.dcp_path = dcp_path
self.announce = TRACKER
self.torrent_comment = COMMENT
self.q = q
def run(self):
logging.debug("Starting Torrent creation")
self.create_torrent()
logging.debug("Exiting torrent creation")
def create_torrent(self):
#_cmd = "/usr/bin/transmission-create -p -t \"{}\" -o \"{}\" -c \"{}\" {}"\
# .format(self.announce, self.torrent_path, self.torrent_comment, self.dcp_path)
_cmd = [
"/usr/bin/transmission-create",
"-p", # private torrent
"-t", # tracker
self.announce,
"-o", # output torrent
self.torrent_path,
"-c", # comment
self.torrent_comment,
self.dcp_path
]
logging.debug("torrent command: {}".format(" ".join(_cmd)))
_sp = SP.Popen(_cmd, stdout=SP.PIPE, stderr=SP.PIPE)
_stdout, _stderr = _sp.communicate()
logging.debug(_stdout)
logging.debug(_stderr)
if not _stderr:
# set torrent readable by all
os.chmod(self.torrent_path, 0644)
self.q.put("{}:TORRENT_OK".format(self.dcp_path))
else:
self.q.put("{}:TORRENT_KO".format(self.dcp_path))
print _stderr
def tdcpb_make_torrent(p_in_path, p_out_torrent_dir):
if not os.path.exists(p_in_path):
_err = "DCP {} does not exists".format(p_in_path)
raise TdcpbException(_err)
if not os.path.exists(p_out_torrent_dir):
_err = "Torrent directory {} does not exists".format(p_out_torrent_dir)
raise TdcpbException(_err)
in_path = os.path.abspath(p_in_path)
torrent_base = os.path.basename(in_path)
torrent_file = os.path.join(os.path.abspath(p_out_torrent_dir),
torrent_base + ".torrent")
logging.debug("Torrent path: {}".format(torrent_file))
workQueue = Queue.Queue(10)
thread1 = TorrentCreatorThread(torrent_file, in_path, workQueue)
thread1.start()
logging.info("Started torrent creation for {}".format(os.path.basename(os.path.abspath(p_in_path))))
while True:
if not workQueue.empty():
data = workQueue.get()
data = data.split(':')
if data[1] == "TORRENT_OK":
logging.info("torrent generation OK for {}"\
.format(os.path.basename(os.path.abspath(p_in_path))))
break
elif data[1] == "TORRENT_KO":
_err = "torrent generation failed for {}"\
.format(os.path.basename(os.path.abspath(p_in_path)))
raise TdcpbException(_err)
logging.debug("Torrent creation in progress...")
time.sleep(5)
# checck that torrent really exist
if not os.path.exists(torrent_file):
_err = "Something goes wrong during torrent creation for {}"\
.format(os.path.basename(os.path.abspath(p_in_path)))
raise TdcpbException(_err)
logging.debug("Torrent generation of {} finished (stored in {})"\
.format(os.path.basename(os.path.abspath(p_in_path)), torrent_file))
def main(argv):
parser = argparse.ArgumentParser(description='Create torents')
parser.add_argument('in_path',
metavar='PATH',
type = str,
nargs = "?",
help = 'Input path to a file or directory' )
parser.add_argument('out_torrent_dir',
metavar='TORRENT_PATH_DIR',
type = str,
nargs = "?" ,
help = "Directory path where the folder will be stored." )
parser.add_argument('-d', '--debug', dest='debug', action='store_const',
const=logging.DEBUG, default=logging.INFO,
help='debug mode')
args = parser.parse_args()
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(message)s - %(filename)s %(funcName)s line %(lineno)d thread %(thread)d/%(threadName)s',
level=args.debug)
try:
tdcpb_make_torrent(args.in_path, args.out_torrent_dir)
except TdcpbException as _err:
logging.error(_err)
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| UTF-8 | Python | false | false | 5,728 | 18 | tdcpb-make-torrent | 15 | 0.596369 | 0.590433 | 0 | 156 | 35.705128 | 154 |
|
nikhilkmr300/pso-solver | 1,245,540,561,122 | f2701ad35dbb8cd083485f2052bcedc06fe30fe6 | d014fef83d639ca02e1b4ce10bbc6e51836b5120 | /tests/tests_bivariate/test_sphere.py | 455ab25055221d21af356561ea2d93eb979ca849 | [
"MIT"
]
| permissive | https://github.com/nikhilkmr300/pso-solver | febebff884ecca54a1021ada2b4f2b14934f231f | 77b471e3fd85788aacaa5d19e53022333c48ea2d | refs/heads/master | 2022-12-28T03:19:34.372259 | 2020-10-06T19:40:41 | 2020-10-06T19:40:41 | 274,802,709 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Refer: https://en.wikipedia.org/wiki/Test_functions_for_optimization
import numpy as np
import matplotlib.pyplot as plt
import pso_solver
pso_solver.setSeed(1)
lower = xlower = ylower = -50
upper = xupper = yupper = 50
particleList = pso_solver.createRandomParticleList(2, numParticles=10, lower=lower, upper=upper)
# Testing on sphere function
f = lambda x, y: x**2 + y**2
pso_solver.psoVisualizeBivariate(particleList, f, xlower, xupper, ylower, yupper, c1=1, c2=1, W=0.5, numIters=30, maxFlag=False, sleepTime=0.1, density=100, accuracy=2, verbose=False)
| UTF-8 | Python | false | false | 565 | py | 14 | test_sphere.py | 11 | 0.750442 | 0.707965 | 0 | 16 | 34.3125 | 183 |
sailesh-b/nakshatra-official | 14,113,262,538,431 | 35c8ebe9ef8759862b01c2ecaf1455d8990a2140 | 5a3d9eef90215f077ceb9f54153f4baccf8bdcca | /app/urls.py | ed3f90e06cad69cccfa63668eeff210c6e618b82 | []
| no_license | https://github.com/sailesh-b/nakshatra-official | 2ffe9861bb901f5b56e84f6ed28a9b4fa52d2adc | 2cde8b1114c74d4d239e9653a538775ea55f70f3 | refs/heads/main | 2023-06-14T05:25:31.828425 | 2021-07-07T14:04:00 | 2021-07-07T14:04:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
from .api import apiViews
urlpatterns=[
path("product/new/", apiViews.new, name="newProduct"),
path("products/<int:id>", apiViews.delete, name="deleteProduct"),
path("products/edit/<int:id>/", apiViews.edit, name="editProduct"),
path("products/", apiViews.all, name="showAll"),
path("api/product/<int:id>", apiViews.productsApi.as_view(), name="productDetails"),
]
urlpatterns += [
path("index", views.index),
path("contact",views.contact),
path("login",views.login),
path("work/<int:id>",views.work)
]
| UTF-8 | Python | false | false | 592 | py | 8 | urls.py | 6 | 0.668919 | 0.668919 | 0 | 19 | 30.105263 | 88 |
readdy/readdy | 8,804,682,964,191 | 534dad257d74069b4dade48e673ce6c298f31e82 | 632a642c241fe5f40d0263e5cff4d9ea5e30bc59 | /wrappers/python/src/python/readdy/tests/test_topology_graphs.py | 89caba33ede97d7b3286d5ec258d7de73b498ed7 | [
"BSD-3-Clause"
]
| permissive | https://github.com/readdy/readdy | b03bbcc8aabc634ed8dd31be4eaca206e1408248 | 97901fb4fdb1f708b31399a5c7b33c316769727f | refs/heads/master | 2022-07-20T05:58:11.614900 | 2022-07-11T13:59:29 | 2022-07-11T13:59:29 | 59,560,469 | 50 | 15 | BSD-3-Clause | false | 2022-07-11T13:59:31 | 2016-05-24T09:41:44 | 2022-05-30T07:57:11 | 2022-07-11T13:59:30 | 7,485 | 48 | 12 | 13 | C++ | false | false | # coding=utf-8
# Copyright © 2018 Computational Molecular Biology Group,
# Freie Universität Berlin (GER)
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Created on 24.03.17
@author: clonker
"""
from __future__ import print_function
import unittest
import numpy as np
import readdy._internal.readdybinding.common as common
from readdy._internal.readdybinding.api import BondedPotentialConfiguration
from readdy._internal.readdybinding.api import ParticleTypeFlavor
from readdy._internal.readdybinding.api import Simulation
from readdy._internal.readdybinding.api import Context
from readdy.util.testing_utils import ReaDDyTestCase
class TestTopologyGraphs(ReaDDyTestCase):
def test_sanity(self):
context = Context()
context.box_size = [10., 10., 10.]
context.topologies.add_type("TA")
context.particle_types.add("T", 1.0, flavor=ParticleTypeFlavor.TOPOLOGY)
context.topologies.configure_bond_potential("T", "T", BondedPotentialConfiguration(10., 11., "harmonic"))
sim = Simulation("SingleCPU", context)
np.testing.assert_equal(sim.kernel_supports_topologies(), True)
particles = [sim.create_topology_particle("T", common.Vec(x, 0, 0)) for x in range(4)]
top = sim.add_topology("TA", particles)
graph = top.graph
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
np.testing.assert_equal(len(graph.get_vertices()), 4)
for v in graph.vertices:
if v.particle_index == 0:
np.testing.assert_equal(top.position_of_vertex(v), common.Vec(0, 0, 0))
np.testing.assert_equal(len(v.neighbors()), 1)
neigh = v.neighbors()[0]
np.testing.assert_(neigh in graph.vertices)
np.testing.assert_equal(1 in [vv.particle_index for vv in v], True)
if v.particle_index == 1:
np.testing.assert_equal(top.position_of_vertex(v), common.Vec(1, 0, 0))
np.testing.assert_equal(len(v.neighbors()), 2)
np.testing.assert_equal(0 in [vv.get().particle_index for vv in v], True)
np.testing.assert_equal(2 in [vv.get().particle_index for vv in v], True)
if v.particle_index == 2:
np.testing.assert_equal(top.position_of_vertex(v), common.Vec(2, 0, 0))
np.testing.assert_equal(len(v.neighbors()), 2)
np.testing.assert_equal(1 in [vv.get().particle_index for vv in v], True)
np.testing.assert_equal(3 in [vv.get().particle_index for vv in v], True)
if v.particle_index == 3:
np.testing.assert_equal(top.position_of_vertex(v), common.Vec(3, 0, 0))
np.testing.assert_equal(len(v.neighbors()), 1)
np.testing.assert_equal(2 in [vv.get().particle_index for vv in v], True)
top.configure()
sim.run(0, 1)
def test_unconnected_graph(self):
context = Context()
context.topologies.add_type("TA")
context.box_size = [10., 10., 10.]
context.particle_types.add("T", 1.0, flavor=ParticleTypeFlavor.TOPOLOGY)
context.topologies.configure_bond_potential("T", "T", BondedPotentialConfiguration(10, 11, "harmonic"))
sim = Simulation("SingleCPU", context)
np.testing.assert_equal(sim.kernel_supports_topologies(), True)
particles = [sim.create_topology_particle("T", common.Vec(0, 0, 0)) for _ in range(4)]
top = sim.add_topology("TA", particles)
graph = top.get_graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
with (np.testing.assert_raises(ValueError)):
top.configure()
def test_unbonded_edge(self):
context = Context()
context.box_size = [10., 10., 10.]
context.topologies.add_type("TA")
context.particle_types.add("T", 1.0, flavor=ParticleTypeFlavor.TOPOLOGY)
context.particle_types.add("D", 1.0, flavor=ParticleTypeFlavor.TOPOLOGY)
context.topologies.configure_bond_potential("T", "T", BondedPotentialConfiguration(10., 11., "harmonic"))
sim = Simulation("SingleCPU", context)
np.testing.assert_equal(sim.kernel_supports_topologies(), True)
particles = [sim.create_topology_particle("T", common.Vec(0, 0, 0)) for _ in range(3)]
particles.append(sim.create_topology_particle("D", common.Vec(0, 0, 0)))
top = sim.add_topology("TA", particles)
graph = top.get_graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
with (np.testing.assert_raises(ValueError)):
top.configure()
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 6,211 | py | 334 | test_topology_graphs.py | 300 | 0.659526 | 0.641488 | 0 | 133 | 45.684211 | 113 |
Tynukua/getManga | 4,286,377,378,716 | 4dd88824c03d789614d05c3b2eb1ba59f45a5e23 | e4cbae3bf90f514eaf578bc555bc1f2b615276e3 | /getmanga/parsers/readmanga.py | a369300631ecc109b25de45369a3a05836b91993 | [
"MIT"
]
| permissive | https://github.com/Tynukua/getManga | 875694f04d94e25cc4548d913ff45a582b3b929e | 8cc5b090ec3dfcc6cfa7db3ce9e5220e7ef54d2b | refs/heads/master | 2021-06-24T15:14:13.478561 | 2021-01-13T17:38:46 | 2021-01-13T17:38:46 | 197,462,493 | 3 | 0 | MIT | false | 2020-01-03T13:53:18 | 2019-07-17T21:03:15 | 2019-11-20T20:41:33 | 2020-01-03T13:51:34 | 68 | 0 | 0 | 0 | Python | false | false | import re, json
import asyncio
import io
from aiohttp import ClientSession
from scrapy import Selector
from ..loader import AsyncLoader
from .abcparser import ParserABC
mangaidregexp = re.compile(r'https:\/\/[rm][ei][an][dt]manga.live\/([^\/]+)')
scriptparser = re.compile('init\((.+)\)')
class ReadManga(ParserABC):
@classmethod
def urlparse(cls, url: str)->str:
match = mangaidregexp.search(url)
if match:
return match.group(1)
async def parse_info(self):
async with ClientSession() as session:
domain = self._manga._domain
_id = self._manga._id
async with session.get(f'https://{domain}/{_id}') as resp:
if resp.status != 200:
raise ValueError(
f"https://{domain}/{_id} STATUS {resp.status}")
self._page = await resp.text()
selector = Selector(text=self._page)
self._manga.title = selector.css('.name::text').get()
self._manga.description = selector.css(
'div.manga-description::text').get().strip()
#parse authors:
persons = selector.css('a.person-link::text').getall()
translatos = selector.css(
'span.elem_translator a.person-link::text').getall()
for i in translatos:
persons.remove(i)
self._manga.authors = persons
self._manga
# parse contents
l = selector.xpath("//td[@class=' ']/a").xpath('@href').re(
r'/vol(\d+)/(\d+)')
self._manga.last_volume = l[0]
self._manga.last_chapter = l[1]
while l:
vol = l.pop(0)
ch = l.pop(0)
if not vol in self._manga.contents:
self._manga.contents[vol] = []
self._manga.contents[vol].append(ch)
def __furl(self, vol, ch):
return 'https://{domain}/{manga_id}/vol{vol}/{ch}?mtr=1'.format(
domain = self._manga._domain,
manga_id=self._manga._id,
vol=vol,
ch=ch
)
def __check(self, vol, ch):
if vol is None: return 1
elif vol in self._manga.contents:
if ch is None or ch in self._manga.contents[vol]:
return 1
raise ValueError(f"No such volume or chapter {vol}-{ch}")
async def parse_images(self, vol = None, ch = None):
self.__check(vol,ch) #TODO: check aioblocking
async with ClientSession() as session:
if not vol is None and not ch is None:
async with session.get(self.__furl(vol,ch)) as resp:
text = await resp.text()
return self.__parse_images(text)
elif not vol is None and ch is None:
urls=[self.__furl(vol,ch) for ch in self._manga.contents[vol]]
elif vol is None and ch is None:
urls = []
for vol_i in self._manga.contents:
urls+=[self.__furl(vol_i,ch) for ch in self._manga.contents[vol_i]]
al = AsyncLoader(min((len(urls), 20)),session=session)
urls = [(u,io.StringIO()) for u in urls]
al.put(urls)
al.start()
await al.wait()
imgs = []
loop = asyncio.get_running_loop()
for _,ss in urls:
ss.seek(0)
sb = await loop.run_in_executor(None,ss.read,None)
ss.close()
imgs += self.__parse_images(sb)
return imgs
def __parse_images(self, text):
selector = Selector(text= text)
for script in selector.css('script').getall():
if 'init' in script: break
else:
raise ValueError("Script not found")
match = scriptparser.search(script)
if not match:
raise ValueError("Script not parsed")
fargs = match.group(1)
fargs = '[' + fargs.replace("'", '"').strip() + ']'
imgs_splited = json.loads(fargs)[0]
imgs = [ ''.join(i[:3]) for i in imgs_splited]
return imgs
| UTF-8 | Python | false | false | 4,169 | py | 11 | readmanga.py | 9 | 0.519309 | 0.515231 | 0 | 115 | 35.226087 | 87 |
ar4s/ralph | 11,201,274,713,026 | 2eebd23f05808c9c6473318ad62a7a45142e5e9a | 72d542bc7fb86a47d29dd38f3040a5193bcbe080 | /src/ralph/middleware.py | 4d0d6227ded066907d2f43763114cfebc1d0d1e7 | [
"Apache-2.0"
]
| permissive | https://github.com/ar4s/ralph | 8f5da56887d9fb4882811868740eec6c1ee7367a | 8249cb79bd2c5d7dba25b07ec8abfa61eb5fab07 | refs/heads/develop | 2021-10-25T14:01:52.061444 | 2016-07-27T13:36:07 | 2016-07-27T13:36:07 | 19,566,764 | 0 | 1 | NOASSERTION | true | 2019-08-14T09:59:49 | 2014-05-08T08:48:11 | 2016-07-08T07:54:35 | 2018-12-28T19:04:32 | 34,237 | 0 | 1 | 1 | Python | false | false | from threading import current_thread
from django.conf import settings
from django.contrib.auth.models import User
from ralph.account.models import Region
_requests = {}
def get_actual_regions():
thread_name = current_thread().name
if thread_name not in _requests:
return Region.objects.filter(
name=settings.DEFAULT_REGION_NAME,
)
return _requests[thread_name]['regions']
class RegionMiddleware(object):
def process_request(self, request):
if hasattr(request, 'user'):
if request.user.is_anonymous():
try:
user = User.objects.get(
username=request.GET.get('username'),
api_key__key=request.GET.get('api_key')
)
except User.DoesNotExist:
user = None
else:
user = request.user
if user:
data = {
'user_id': user.id,
'regions': user.profile.get_regions(),
}
_requests[current_thread().name] = data
def process_response(self, request, response):
if hasattr(request, 'user') and not request.user.is_anonymous():
_requests.pop(current_thread().name, None)
return response
| UTF-8 | Python | false | false | 1,338 | py | 552 | middleware.py | 366 | 0.546338 | 0.546338 | 0 | 44 | 29.409091 | 72 |
Roboy/LSM_SpiNNaker_MyoArm | 11,587,821,776,943 | 1fc900e8a27749649806c166d5a3f7714d7fd5eb | a1dce04838e2a4aa85e8a98459ae384aa00911bc | /src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinn_front_end_common/utilities/report_functions/front_end_common_memory_map_on_chip_report.py | c5956168724e4947fbff6038c732901ad94931be | [
"BSD-3-Clause"
]
| permissive | https://github.com/Roboy/LSM_SpiNNaker_MyoArm | 8cb5b44c772bd9843989c64265dfcabd08905b4c | 04fa1eaf78778edea3ba3afa4c527d20c491718e | refs/heads/master | 2021-01-23T17:42:33.122281 | 2017-10-30T11:34:22 | 2017-10-30T11:34:22 | 102,772,213 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from data_specification import constants
from spinn_machine.utilities.progress_bar import ProgressBar
import logging
import os
import struct
logger = logging.getLogger(__name__)
MEM_MAP_SUBDIR_NAME = "memory_map_reports"
class FrontEndCommonMemoryMapOnChipReport(object):
""" Report on memory usage
"""
def __call__(self, report_default_directory, dsg_targets, transceiver):
"""
:param report_default_directory:
:param processor_to_app_data_base_address:
:return:
"""
directory_name = os.path.join(
report_default_directory, MEM_MAP_SUBDIR_NAME)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
progress_bar = ProgressBar(len(dsg_targets),
"Writing memory map reports")
for (x, y, p) in dsg_targets:
file_name = os.path.join(
directory_name,
"memory_map_from_processor"
"_{0:d}_{1:d}_{2:d}.txt".format(x, y, p))
output = None
try:
output = open(file_name, "w")
except IOError:
logger.error("Generate_placement_reports: Can't open file"
" {} for writing.".format(file_name))
output.write("On chip data specification executor\n\n")
report_data_address_pointer = transceiver.\
get_user_1_register_address_from_core(x, y, p)
report_data_address_encoded = buffer(transceiver.read_memory(
x, y, report_data_address_pointer, 4))
report_data_address = struct.unpack_from(
"<I", report_data_address_encoded)[0]
report_bytes = \
_MemoryChannelState.STRUCT_SIZE * constants.MAX_MEM_REGIONS
mem_map_report_data = buffer(transceiver.read_memory(
x, y, report_data_address, report_bytes))
offset = 0
for i in xrange(constants.MAX_MEM_REGIONS):
region = _MemoryChannelState.from_bytestring(
mem_map_report_data, offset)
offset += _MemoryChannelState.STRUCT_SIZE
if region.start_address == 0:
output.write("Region {0:d}: Unused\n\n".format(i))
else:
if region.unfilled:
space_written = 0
else:
space_written = region.written
output.write(
"Region {0:d}:\n\t"
"start address: 0x{1:x}\n\t"
"size: {2:d}\n\t"
"unfilled: {3:s}\n\t"
"write pointer: 0x{4:x}\n\t"
"size currently written(based on the "
"write pointer): {5:d}\n\n".format(
i, region.start_address, region.size,
region.unfilled_tf, region.write_pointer,
space_written))
output.flush()
output.close()
progress_bar.update()
progress_bar.end()
class _MemoryChannelState(object):
# 4 fields each of 4 bytes
STRUCT_SIZE = 16
def __init__(self, start_address, size, unfilled, write_pointer):
self._start_address = start_address
self._size = size
self._unfilled = unfilled
self._write_pointer = write_pointer
@property
def start_address(self):
return self._start_address
@property
def size(self):
return self._size
@property
def unfilled(self):
return self._unfilled
@property
def unfilled_tf(self):
if self._unfilled:
string = "True"
else:
string = "False"
return string
@property
def write_pointer(self):
return self._write_pointer
@property
def written(self):
return self._write_pointer - self._start_address
@staticmethod
def from_bytestring(data, offset=0):
start_address = struct.unpack_from("<I", data, offset)[0]
size = struct.unpack_from("<I", data, offset + 4)[0]
unfilled = struct.unpack_from("<I", data, offset + 8)[0]
write_pointer = struct.unpack_from("<I", data, offset + 12)[0]
state = _MemoryChannelState(
start_address, size, unfilled, write_pointer)
return state
def bytestring(self):
encoded_state = bytearray()
encoded_state += struct.pack("<I", self._start_address)
encoded_state += struct.pack("<I", self._size)
encoded_state += struct.pack("<I", self._unfilled)
encoded_state += struct.pack("<I", self._write_pointer)
return encoded_state
| UTF-8 | Python | false | false | 4,834 | py | 752 | front_end_common_memory_map_on_chip_report.py | 664 | 0.538064 | 0.531651 | 0 | 151 | 31.013245 | 75 |
Square789/Demomgr | 13,915,694,049,779 | 07a0b6f008759db5f84b82ebf0f648d411472d52 | f95067e684894edb3148925c7b84ad803dee0aa0 | /demomgr/handle_events.py | ca29b7bf6994b983c374c47cf0c274d6e106bad9 | [
"BSD-3-Clause"
]
| permissive | https://github.com/Square789/Demomgr | 3a8904badebe6eb0706d64bb19aa11d117cd4a22 | 1b093f6e1284890d0164539aa4ca5b0a5a40f585 | refs/heads/master | 2022-12-22T04:19:41.552613 | 2022-12-13T21:26:09 | 2022-12-13T21:26:09 | 165,436,359 | 4 | 2 | MIT | false | 2022-03-14T00:44:13 | 2019-01-12T21:10:57 | 2021-10-31T09:16:03 | 2022-03-14T00:44:13 | 1,438 | 4 | 0 | 11 | Python | false | false | """
Classes designed to ease up the handling of a _events.txt file as written
by the source engine.
2022 update: This code is like 2 years old and could use a serious make-over.
"""
_DEF = {"sep": ">\n"}
read_DEF = {"blocksz": 65536, "resethandle": True}
write_DEF = {"clearfile": False, "forceflush": False, "empty_ok": False}
class RawLogchunk():
"""
Class to contain a raw logchunk and the following attributes:
content: Content read directly from the file. (str)
is_last: Whether the chunk is the last one in the file. (bool)
fromfile: Absolute path to file that the chunk was read from. (str)
"""
__slots__ = ("content", "is_last", "fromfile")
def __init__(self, content, is_last, fromfile):
self.content = content
self.is_last = is_last
self.fromfile = fromfile
def __bool__(self):
return bool(self.content)
def __repr__(self):
return f"<Logchunk from file {self.fromfile}>"
def __str__(self):
return self.content
class EventReader():
"""
Class designed to read a Source engine demo event log file.
handle: Must either be a file handle object or a string to a file.
If a file handle, must be opened in r, w+, a+ mode and with
utf-8 encoding. It will not be closed after destruction
of the reader.
sep: Seperator of individual logchunks. (Default '>\\n', str)
resethandle: Will reset the file handle's position to 0 upon
creation. (Default True, bool)
blocksz: Blocksize to read files in. (Default 65536, int)
May raise:
OSError when handle creation fails.
UnicodeError when a given handle is not opened in utf-8.
UnicodeDecodeError when reading an event file with non-utf-8 data.
"""
def __init__(self, handle, sep = None, resethandle = None, blocksz = None):
self.isownhandle = False
if isinstance(handle, str):
self.isownhandle = True
handle = open(handle, "r", encoding = "utf-8")
else:
if handle.encoding.lower() not in ("utf8", "utf-8"):
raise UnicodeError("Handle must be opened in utf-8 encoding!")
self.handle = handle
self.cnf = _DEF.copy()
self.cnf.update(read_DEF)
for v, n in ((sep, "sep"), (resethandle, "resethandle"), (blocksz, "blocksz")):
if v is not None:
self.cnf[n] = v
self.filename = self.handle.name
if self.cnf["resethandle"]:
self.handle.seek(0)
self.lastchunk = ""
self.chunkbuffer = []
def __enter__(self):
return self
def __iter__(self):
return self
def __exit__(self, *_):
self.destroy()
def __next__(self):
chk = self.getchunks(1)[0]
if not chk or chk.content.isspace():
raise StopIteration
return chk
def destroy(self):
self.handle.close()
del self
def getchunks(self, toget = 1):
"""
Gets specified amout of chunks from the file.
Returns a list of RawLogchunks.
toget: How many RawLogchunks the list should contain.
(Default 1, int)
Warning: The amount of returned chunks may be lower than
the requested amount if the file has ended.
"""
while len(self.chunkbuffer) < toget:
self.__read()
returnbfr = []
for _ in range(toget):
returnbfr.append(self.chunkbuffer.pop(0))
return returnbfr
def reset(self):
"""Resets the EventReader to the start of the file."""
self.handle.seek(0)
self.lastchunk = ""
self.chunkbuffer = []
def __read(self):
"""
Internal method reading logchunks and adding them to
`self.chunkbuffer`.
"""
raw = ""
rawread = ""
logchunks = []
rawread = self.handle.read(self.cnf["blocksz"])
raw = self.lastchunk + rawread
logchunks = raw.split(self.cnf["sep"])
if (self.handle.tell() - 1) <= self.cnf["blocksz"]: # This was the first read
if logchunks and logchunks[0] == "":
logchunks.pop(0)
if len(logchunks) == 0:
self.chunkbuffer.append(RawLogchunk("", True, self.handle.name))
return
# Sometimes, the file starts with >, in which case the first logchunk may be empty.
elif len(logchunks) == 1:
if rawread == "":
self.lastchunk = ""
else:
self.lastchunk = logchunks.pop(0) # Big logchunk
else:
self.lastchunk = logchunks.pop(-1)
self.chunkbuffer.extend(
[RawLogchunk(i[:-1], not bool(rawread), self.handle.name) for i in logchunks]
)
class EventWriter():
"""
Class designed to write to a Source engine demo event log file.
handle: Must either a file handle object or one of the types accepted
by `open`.
If a file handle, must be opened in a+ mode.
If a file handle, it will not be closed after destruction
of the writer.
sep: Seperator of individual logchunks. (str)
clearfile: Whether to delete the file's contents once as soon as the
handler is created. (Default False, bool)
forceflush: Will call the flush() method on the file handle after
every written logchunk. (Default False, bool)
empty_ok: If false, raises a ValueError if an empty chunk is
written. If true, does not write the chunk, but continues without
raising an exception. (Default False, bool)
"""
def __init__(self, handle, sep = None, clearfile = None, forceflush = None, empty_ok = None):
self.cnf = _DEF.copy()
self.cnf.update(write_DEF)
for v, n in (
(sep, "sep"), (clearfile, "clearfile"),
(forceflush, "forceflush"), (empty_ok, "empty_ok")
):
if v is not None:
self.cnf[n] = v
self.isownhandle = False
if isinstance(handle, (str, bytes, int)):
self.isownhandle = True
handle = open(handle, "a+", encoding = "utf-8")
else:
if handle.encoding.lower() not in ("utf8", "utf-8"):
raise UnicodeError("Handle must be opened in utf-8 encoding!")
self.handle = handle
self.filename = self.handle.name
if self.handle.mode != "a+":
raise ValueError("Handle must be openend in a+ format.")
if self.cnf["clearfile"]:
self.handle.seek(0)
self.handle.truncate(0)
self.handle.seek(0, 2) # Move to end of file
def __enter__(self):
return self
def __exit__(self, *_): # NOTE: maybe handle exceptions dunno
self.destroy()
def writechunk(self, in_chk):
"""Writes a string or RawLogchunk to the file following options specified."""
if not isinstance(in_chk, (RawLogchunk, str)):
raise ValueError(f"Expected RawLogchunk or str, not {type(in_chk).__name__}")
if not in_chk:
if self.cnf["empty_ok"]:
return
else:
raise ValueError("Empty logchunks can not be written.")
# If start of file, don't write >\n, else do.
# Always write \n when done
if self.handle.tell() == 0:
pass
else:
self.handle.write(self.cnf["sep"])
self.handle.write(str(in_chk))
self.handle.write("\n")
if self.cnf["forceflush"]:
self.handle.flush()
def writechunks(self, in_chks):
"""Accepts a list of Strings or Logchunks and writes them to file."""
for i in in_chks:
self.writechunk(i)
def destroy(self):
"""Closes handle if it was created inside of the EventWriter."""
if self.isownhandle:
self.handle.close()
del self
| UTF-8 | Python | false | false | 6,821 | py | 46 | handle_events.py | 42 | 0.674241 | 0.667351 | 0 | 237 | 27.780591 | 94 |
jinseoo/DataSciPy | 12,506,944,808,239 | 10cbab9c82e3c82b1c01b6ea67874ce85d918086 | 6040ec2771a81654ac41f33ce5c4aa7e66d4e5d9 | /src/파이썬코드(py)/Ch04/lab_4_7.py | 2dcd1fcad951682bcb7c0c9b932ff6d2792e9f2d | []
| no_license | https://github.com/jinseoo/DataSciPy | a3462785ae094530141e66ead8de9e6519fbf193 | de6127c0741f8d0cfc989e17ba3a5a65004e5d9c | refs/heads/master | 2023-06-25T19:03:22.086126 | 2021-07-27T09:01:41 | 2021-07-27T09:01:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# LAB 4-7 로그인 처리하기, 109쪽
#
id = "ilovepython"
s = input("아이디를 입력하시오: ")
if s == id:
print("환영합니다.")
else:
print("아이디를 찾을 수 없습니다.") | UTF-8 | Python | false | false | 282 | py | 248 | lab_4_7.py | 214 | 0.597701 | 0.545977 | 0 | 10 | 16.5 | 33 |
lukaszoller/hyperOptimize | 3,229,815,452,299 | 1fa78d5fdc401b23b46b69074bfb8606a161bff1 | 186ad7f744dc6166fe2dd826bfcce720381097c7 | /src/hyperOptimizeApp/testing/testDelete.py | 93dd66d8e1a8d4e3af7cd907c8b07708495247c1 | []
| no_license | https://github.com/lukaszoller/hyperOptimize | a2f3c8267104ad6e0845c3636652e6b821ea9bb0 | 99f11ea22032e1f6b1ba847bbccf275558b798ff | refs/heads/master | 2020-08-01T21:44:45.542590 | 2020-01-17T09:18:34 | 2020-01-17T09:18:34 | 211,126,166 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
print(10 ** -float(2.59)) | UTF-8 | Python | false | false | 46 | py | 46 | testDelete.py | 33 | 0.652174 | 0.543478 | 0 | 4 | 10.75 | 25 |
VSakuya/MeaBot | 9,990,093,974,066 | 1155748771b00360b36d5bddf11dc81032079962 | d5372221ba83a957df480af33d4d5e7af51550f9 | /asset/plugins/duel/data_source.py | 934e4d13a5af40903a6e9353e37b2c280a6e63ac | []
| no_license | https://github.com/VSakuya/MeaBot | d09bd3c6811e68741a1aadcb15a84005fd6b4345 | e66aff2cf252bf4ec3e5784b19b42e24ec6bce0b | refs/heads/master | 2022-02-22T01:26:41.048037 | 2019-08-29T04:08:33 | 2019-08-29T04:08:33 | 196,145,025 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import os
import re
import platform
from config import global_var
from nonebot import logger
def check_file():
duel_dir = os.path.join(os.curdir, 'asset', 'data', 'duel.json')
if not os.path.exists(duel_dir):
empty_dict = {}
with open(duel_dir , 'w', encoding='utf-8') as data_json:
json.dump(empty_dict, data_json, ensure_ascii = False)
logger.info('duel file checked')
async def get_duel_data() -> dict:
duel_dir = os.path.join(os.curdir, 'asset', 'data', 'duel.json')
with open(duel_dir , 'r', encoding='utf-8') as data_json:
data_dict = json.load(data_json)
return data_dict
return None
async def write_duel_data(in_dict: dict) -> bool:
duel_dir = os.path.join(os.curdir, 'asset', 'data', 'duel.json')
with open(duel_dir , 'w', encoding='utf-8') as data_json:
json.dump(in_dict, data_json, ensure_ascii = False)
return True
return False
async def add_single_duel(user_id: int, group_id: int, bullets_use: int, is_dead: bool):
duel_data = await get_duel_data()
if not str(group_id) in duel_data:
duel_data[str(group_id)] = {}
if not str(user_id) in duel_data[str(group_id)]:
duel_data[str(group_id)][str(user_id)] = {}
user_data = duel_data[str(group_id)][str(user_id)]
if not str(bullets_use) in user_data:
user_data[str(bullets_use)] = {}
bullet_data = user_data[str(bullets_use)]
if not 'death' in bullet_data:
bullet_data['death'] = 0
if not 'alive' in bullet_data:
bullet_data['alive'] = 0
if is_dead:
bullet_data['death'] = bullet_data['death'] + 1
else:
bullet_data['alive'] = bullet_data['alive'] + 1
user_data[str(bullets_use)] = bullet_data
duel_data[str(group_id)][str(user_id)] = user_data
result = await write_duel_data(duel_data)
return result
async def get_user_duel_data(group_id: int, user_id: int, bullets_use: int = 0):
duel_data = await get_duel_data()
if not str(group_id) in duel_data:
return None
if not str(user_id) in duel_data[str(group_id)]:
return None
user_data = duel_data[str(group_id)][str(user_id)]
if bullets_use == 0:
r_data = {}
for key in user_data:
for s_key in user_data[key]:
if not s_key in r_data:
r_data[s_key] = 0
r_data[s_key] = r_data[s_key] + user_data[key][s_key]
if not r_data:
return None
return r_data
else:
if not str(bullets_use) in user_data:
return None
else:
return user_data[str(bullets_use)]
| UTF-8 | Python | false | false | 2,680 | py | 22 | data_source.py | 21 | 0.589925 | 0.586194 | 0 | 76 | 34.263158 | 88 |
Mongoal/version_20.04.07 | 4,243,427,717,214 | 7fff309ab3d26c9f07423ed1874267d2b9a336fa | 9d57c4453b5707da993758edce95c651ee2b65a7 | /data_loader/data_generator.py | e019bf2dbce410e23060264a5683c24d95697d48 | []
| no_license | https://github.com/Mongoal/version_20.04.07 | 7ad15221363038219c731ad0e86ff7141b521a3e | 7e26fab9ba2c3438108cc5e6f0002e136d6d630c | refs/heads/master | 2022-12-19T14:07:08.136542 | 2020-09-13T08:40:25 | 2020-09-13T08:40:25 | 279,265,565 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from data_loader.h5data_reader import H5DataReader
# 减少io次数,一次读取BUFFER_SIZE个batch
BUFFER_SIZE = 32
class DataGenerator:
def __init__(self, config):
self.config = config
# load data here
self.h5_reader = H5DataReader(config.h5_data_path, mode='r', data_key=config.h5_data_key, label_key=config.h5_label_key, seed=config.h5_shuffle_seed)
if config.get("h5_condition_args"):
self.h5_reader.set_condition_idx(*config.h5_condition_args)
self.train_batch_generator = None
self.test_batch_generator = None
self.batch_generator = None
def get_train_batch_generator(self, batch_size):
'''
获取遍历整个数据集的迭代器,每次取数据:
generator = get_batch_generator(batch_size)
for _ in it:
data = next(generator)
process(data)
或use like : batch_x, batch_y = next(self.data.get_train_batch_generator(self.config.batch_size))
Args:
batch_size:
Returns:
'''
if self.train_batch_generator is None:
self.train_batch_generator = self.next_train_batch_generator(batch_size)
return self.train_batch_generator
def get_test_batch_generator(self, batch_size):
'''
获取遍历整个数据集的迭代器,每次取数据:
generator = get_batch_generator(batch_size)
for _ in it:
data = next(generator)
process(data)
Args:
batch_size:
Returns:
'''
if self.test_batch_generator is None:
self.test_batch_generator = self.next_test_batch_generator(batch_size)
return self.test_batch_generator
def get_batch_generator(self, batch_size):
'''
获取遍历整个数据集的迭代器,每次取数据:
generator = get_batch_generator(batch_size)
for _ in it:
data = next(generator)
process(data)
Args:
batch_size: 批大小
Returns:迭代器
'''
if self.batch_generator is None:
self.batch_generator = self.next_batch_generator(batch_size)
return self.batch_generator
def next_batch_generator(self, batch_size):
'''
return an generator
Args:
batch_size:
Returns: batch_generator
'''
while True:
buffer_x, buffer_y = self.h5_reader.get_shuffle_data(batch_size * BUFFER_SIZE)
buffer_length = len(buffer_x)
i = 0
while(i < buffer_length):
start = i
i += batch_size
yield buffer_x[start:i], buffer_y[start:i]
def next_train_batch_generator(self, batch_size):
'''
return an generator
Args:
batch_size:
Returns: batch_generator
'''
while True:
buffer_x, buffer_y = self.h5_reader.get_train_batch(batch_size * BUFFER_SIZE)
buffer_length = len(buffer_x)
i = 0
while(i < buffer_length):
start = i
i += batch_size
yield buffer_x[start:i], buffer_y[start:i]
def next_test_batch_generator(self, batch_size):
'''
return an generator
Args:
batch_size:
Returns: batch_generator
'''
while True:
buffer_x, buffer_y = self.h5_reader.get_test_batch(batch_size * BUFFER_SIZE)
buffer_length = len(buffer_x)
i = 0
while(i < buffer_length):
start = i
i += batch_size
yield buffer_x[start:i], buffer_y[start:i]
def get_epoch_size(self, batch_size):
return (self.h5_reader.length-1)//batch_size + 1
| UTF-8 | Python | false | false | 3,882 | py | 44 | data_generator.py | 30 | 0.553112 | 0.54721 | 0 | 132 | 27.242424 | 157 |
dkopecky75/vocabulaire | 2,233,383,019,791 | 9b43ef694cc4799c8d30abdf2775cf679eec9c66 | 58e01df2fdf2e53f49a9308f277a7a83a449e8cd | /run.py | e088b61bbee5c46d64427b503455f39decda807d | []
| no_license | https://github.com/dkopecky75/vocabulaire | 35010e6666256e59908d03d73c4a2b3a64ccf498 | 8e76bcefdb7852503d234910291b3ae9d46022c2 | refs/heads/master | 2022-08-15T13:19:48.613980 | 2022-06-24T07:24:29 | 2022-06-24T07:24:29 | 204,907,893 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This module is the main entry point into the Flask-based vocabulary application.
Start calling 'env FLASK_APP=run.py /apps/prod/python/python3/bin/flask run'
"""
# Authors: Dieter Kopecky <dieter.kopecky@boehringer-ingelheim.com>
from app import app
app.run(debug=True) | UTF-8 | Python | false | false | 275 | py | 6 | run.py | 6 | 0.774545 | 0.770909 | 0 | 9 | 29.666667 | 80 |
cinxdy/Python_practice | 6,528,350,324,483 | 80805f6daf2ffc6fba8edc9d7cf5d615a339ba41 | 60530f3e1d22fcb3b51be0b9f482a912c8203bb0 | /Python_Workbook/J08.py | ce6a4adfbf174fd49518dbc92f3c8f72a4f812fa | []
| no_license | https://github.com/cinxdy/Python_practice | 1afb2aca1c92d16d98459407ae02ca2ed7f7832c | 8a2642b51c6ad73840dae964b1a55cbb53f7b9f7 | refs/heads/master | 2022-05-09T00:10:15.849881 | 2022-04-03T09:38:53 | 2022-04-03T09:38:53 | 180,339,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def Ackermann(i,j):
if i==0 and j>=0 :
return j+1
elif i>0 and j==0 :
return Ackermann(i-1,1)
else : return Ackermann(i-1,Ackermann(i,j-1))
for i in range(4):
for j in range(4):
print("Ackermann(%d,%d) = %d"%(i,j,Ackermann(i,j))) | UTF-8 | Python | false | false | 270 | py | 108 | J08.py | 102 | 0.544444 | 0.503704 | 0 | 10 | 26.1 | 59 |
kilsenp/person-multi-task-dataset | 1,846,835,944,637 | 5e26eff49701041068a1c385a0c420717930c054 | 3146428ae92aeeaf4ded579cad78a00c3fd0d803 | /tests/model_test.py | de64b3bf69880212663922d6b53a1be09b669621 | [
"MIT"
]
| permissive | https://github.com/kilsenp/person-multi-task-dataset | 047d9bb2b70d8a2cfdbaa6a4960b62b7554bd7a9 | 2f186cafa3db2c77d8c6c4309b2cadc13d4f92ab | refs/heads/master | 2021-07-13T22:36:06.606283 | 2020-03-01T14:58:01 | 2020-03-01T14:58:01 | 239,165,538 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
from builders import model_builder
from models import get_all_models
import sys
import torch
attribute_cfg = {
"attributes": {
"gender": 2,
"age": 2,
"backpack": 2
},
"dropout": True,
"name": "attribute"
}
resnet_cfg = {
"name": "resnet",
"stride": 1
}
resnet_groupnorm_cfg = {
"name": "resnet_groupnorm",
"stride": 1,
"ncpg": 16,
"pretrained": False
}
baseline_cfg = {
"backbone": resnet_cfg,
"pooling": "max"
}
classification_cfg = {
"num_classes": 1,
"merging_block": {
"name": "single",
"endpoint": "softmax"
}
}
classification_triplet_cfg = {
"num_classes": 1
}
conv4_multi_task_cfg = {
"tasks": {
"reid": {"pooling": "max"},
"pose": {"num_joints": 16}
}
}
conv4_2_head_batch_cfg = {
"stride": 1,
"pretrained": False
}
conv4_2_head_group_cfg = {
"stride": 1,
"ncpg": 16,
"pretrained": False
}
mgn_cfg = {
"num_branches": [1],
"num_classes": 1,
"dim": 1
}
multi_branch_classification_cfg = {
"num_branches": 1,
"num_classes": 1,
"local_dim": 1,
"shared": True
}
multi_task_network_cfg = {
"num_classes": 1,
"attributes": {
"gender": 2,
"age": 2,
"backpack": 2
}
}
pose_cfg = {
"num_joints": 2,
"backbone": resnet_groupnorm_cfg
}
pose_reid_cfg = {
"num_joints": 2,
"backbone": resnet_groupnorm_cfg,
"split": False
}
pose_reid_semi_cfg = {
"num_joints": 2,
"backbone": resnet_groupnorm_cfg,
"single_head": True
}
trinet_cfg = {
"dim": 1
}
def get_cfg(name):
module = sys.modules[__name__]
name = name.lower()
try:
return getattr(module, '{}_cfg'.format(name))
except Exception as e:
raise ValueError("Model config {} not found".format(name))
def build_model(name):
cfg = get_cfg(name)
cfg['name'] = name
cfg['pretrained'] = False
return model_builder.build(cfg)
@pytest.mark.parametrize("model", get_all_models())
def test_model(model):
model = build_model(model)
model.eval()
test_input = torch.rand(1, 3, 256, 128)
with torch.no_grad():
endpoints = model(test_input, model.endpoints)
print(model.create_endpoints())
print(model.dimensions)
# foward one image compare to dimensions
for key, dim in model.dimensions.items():
assert endpoints[key].shape[1:] == dim, key
| UTF-8 | Python | false | false | 2,461 | py | 112 | model_test.py | 101 | 0.565624 | 0.548151 | 0 | 145 | 15.972414 | 66 |
DlabrecquePE/PE-work | 18,064,632,480,699 | 5881d35ad75a7b22f23126ffa9d2885a958d2f5c | 66a3362637b864eb3954c01902a2e4f37cd02e21 | /src/Problem 31.py | 43e2960f5c12dc80d4b073af64a95385887894ff | []
| no_license | https://github.com/DlabrecquePE/PE-work | 127e0af1ff2b0bb6dedc15ecc25dcbc7d6c8eb09 | 6509419c0f3de9f7875e257b149cae6f27565108 | refs/heads/main | 2023-02-04T15:02:49.625256 | 2020-12-20T07:51:07 | 2020-12-20T07:51:07 | 322,053,827 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Coin sums
# Problem 31
# In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
#
# 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
# It is possible to make £2 in the following way:
#
# 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
# How many different ways can £2 be made using any number of coins?
import sys
from functools import lru_cache
sys.setrecursionlimit(2000)
@lru_cache(maxsize=2**16)
def coins(n, denoms):
x = len(denoms)
if n < 0 or x <= 0:
return 0
if n == 0:
return 1
removed, reduced = denoms[-1], denoms[:-1]
return coins(n, reduced) + coins(n - removed, denoms)
denominations = (1, 2, 5, 10, 20, 50, 100, 200)
N = 200
print(coins(N, denominations))
| UTF-8 | Python | false | false | 780 | py | 104 | Problem 31.py | 81 | 0.634115 | 0.546875 | 0 | 29 | 25.482759 | 113 |
drczuckerman/ML-TicTacToe | 4,870,492,951,820 | beb298807e302fde69d4395678667ffe659d13a8 | e0fb87868dab9005ebc2d3d4965b66ca8f0cf74c | /test/test_td_symmetric_learning_player.py | b513d29e246a096c5f2f5814d6a51036530b982c | []
| no_license | https://github.com/drczuckerman/ML-TicTacToe | 3a329dd7c54307ec1b258070771d9eca003ce052 | 0fc15691514bea6814411470757de78069705a15 | refs/heads/master | 2021-08-19T16:59:51.962424 | 2017-11-27T01:27:46 | 2017-11-27T01:27:46 | 106,683,399 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from mock import patch
from test_td_learning_player import TestTDLearningPlayer
from td_symmetric_learning_player import TDSymmetricLearningPlayer
from board import Board
from board_test_utils import get_board_state_tuple, assert_get_move_is, set_board
from mock_random import MockRandom
class TestTDSymmetricLearningPlayer(TestTDLearningPlayer):
def setUp(self):
super().setUp()
self.player = TDSymmetricLearningPlayer()
self.file_name_prefix = "TDSymmetricLearningPlayer"
self.player.set_board(self.board)
self.player.enable_learning()
def assert_get_value_and_state_symmetric_is(self, value, pieces, symmetric_pieces, winner, piece):
self.player.set_piece(piece)
state = get_board_state_tuple(pieces)
symmetric_state = get_board_state_tuple(symmetric_pieces)
current_value, new_state = self.player._get_value_and_state(symmetric_state, winner)
self.assertAlmostEqual(value, current_value)
self.assertEqual(state, new_state)
self.assertIn(state, self.player.values)
if symmetric_state != state:
self.assertNotIn(symmetric_state, self.player.values)
def assert_values_after_reward_symmetric_are(self, values, pieces_list, symmetric_pieces_list, winner):
self.player.set_piece(Board.X)
values_dict = {}
for pieces, symmetric_pieces, value in zip(pieces_list, symmetric_pieces_list, values):
set_board(self.board, symmetric_pieces)
self.player.store_state()
values_dict[get_board_state_tuple(pieces)] = value
self.player.set_reward(winner)
self.assertEqual(sorted(values_dict.keys()), sorted(self.player.values.keys()))
for key, value in self.player.values.items():
self.assertAlmostEqual(self.player.values[key], value)
def test_get_value_and_state_returns_current_value_if_symmetric_state_known(self):
state1 = get_board_state_tuple("XO-|X--|---")
state2 = get_board_state_tuple("XO-|X--|O--")
self.player.values[state1] = 0.56
self.player.values[state2] = 0.45
# Original
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "XO-|X--|---", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "XO-|X--|O--", None, Board.O)
# Rotated by 90 degrees
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "---|O--|XX-", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "---|O--|XXO", None, Board.O)
# Rotated by 180 degrees
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "---|--X|-OX", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "--O|--X|-OX", None, Board.O)
# Rotated by 270 degrees
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "-XX|--O|---", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "OXX|--O|---", None, Board.O)
# Reflected horizontally
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "---|X--|XO-", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "O--|X--|XO-", None, Board.O)
# Reflected vertically
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "-OX|--X|---", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "-OX|--X|--O", None, Board.O)
# Reflected on left diagonal
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "XX-|O--|---", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "XXO|O--|---", None, Board.O)
# Reflected on right diagonal
self.assert_get_value_and_state_symmetric_is(0.56, "XO-|X--|---", "---|--O|-XX", None, Board.X)
self.assert_get_value_and_state_symmetric_is(0.45, "XO-|X--|O--", "---|--O|OXX", None, Board.O)
@patch('td_learning_player.random.choice')
@patch('td_learning_player.random.random')
def test_get_move_chooses_best_available_move_if_random_gte_epsilon(
self, random_mock, choice_mock):
random_mock.return_value = 0.1
choice_mock.side_effect = MockRandom(0).choice
self.player.values[get_board_state_tuple("---|-XO|---")] = 0.501 # Original
# Reflected horizontally
# Force other symmetric choices to be of lower value
self.player.values[get_board_state_tuple("-O-|-X-|---")] = 0.499 # Rotated by 90 degrees
# Reflected on right diagonal
self.player.values[get_board_state_tuple("---|OX-|---")] = 0.499 # Rotated by 180 degrees
# Reflected vertically
self.player.values[get_board_state_tuple("---|-X-|-O-")] = 0.499 # Rotated by 270 degrees
# Reflected on left diagonal
assert_get_move_is(self, self.player, self.board, 5, Board.O, "---|-X-|---")
choice_mock.assert_called_once_with([5])
@patch('td_learning_player.random.choice')
@patch('td_learning_player.random.random')
def test_get_move_chooses_random_best_available_move_if_random_gte_epsilon_and_multiple_bests(
self, random_mock, choice_mock):
random_mock.return_value = 0.1
choice_mock.side_effect = MockRandom(1).choice
self.player.values[get_board_state_tuple("X--|-XO|---")] = 0.501 # position 0
self.player.values[get_board_state_tuple("--X|-XO|---")] = 0.501 # position 2
# Symmetries for X--|-XO|---:
# -O-|-X-|X-- (Rotated by 90 degrees)
# ---|OX-|--X (Rotated by 180 degrees)
# --X|-X-|-O- (Rotated by 270 degrees)
# ---|-XO|X-- (Reflected horizontally) - position 6
# --X|OX-|--- (Reflected vertically)
# X--|-X-|-O- (Reflected on left diagonal)
# -O-|-X-|--X (Reflected on right diagonal)
#
# Symmetries for --X|-XO|---:
# XO-|-X-|--- (Rotated by 90 degrees)
# ---|OX-|X-- (Rotated by 180 degrees)
# ---|-X-|-OX (Rotated by 270 degrees)
# ---|-XO|--X (Reflected horizontally) - position 8
# X--|OX-|--- (Reflected vertically)
# ---|-X-|XO- (Reflected on left diagonal)
# -OX|-X-|--- (Reflected on right diagonal)
assert_get_move_is(self, self.player, self.board, 2, Board.X, "---|-XO|---")
choice_mock.assert_called_once_with([0, 2, 6, 8])
def test_set_reward_updates_values_for_each_symmetric_state(self):
self.player.set_params(alpha=0.4)
# Original
self.assert_values_after_reward_symmetric_are(
[0.5128, 0.532, 0.58, 0.7, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
Board.X)
# Rotated by 90 degrees
self.assert_values_after_reward_symmetric_are(
[0.54352, 0.5896, 0.676, 0.82, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["---|O--|X--",
"---|O--|X--",
"---|O--|XX-",
"---|OO-|XX-",
"---|OO-|XXX"],
Board.X)
# Rotated by 180 degrees
self.assert_values_after_reward_symmetric_are(
[0.5896, 0.65872, 0.7624, 0.892, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["---|---|--X",
"---|---|-OX",
"---|--X|-OX",
"---|-OX|-OX",
"--X|-OX|-OX"],
Board.X)
# Rotated by 270 degrees
self.assert_values_after_reward_symmetric_are(
[0.644896, 0.72784, 0.83152, 0.9352, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["--X|---|---",
"--X|--O|---",
"-XX|--O|---",
"-XX|-OO|---",
"XXX|-OO|---"],
Board.X)
# Reflected horizontally
self.assert_values_after_reward_symmetric_are(
[0.7029568, 0.790048, 0.88336, 0.96112, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["---|---|X--",
"---|---|XO-",
"---|X--|XO-",
"---|XO-|XO-",
"X--|XO-|XO-"],
Board.X)
# Reflected vertically
self.assert_values_after_reward_symmetric_are(
[0.7586952, 0.8423027, 0.9206848, 0.976672, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["--X|---|---",
"-OX|---|---",
"-OX|--X|---",
"-OX|-OX|---",
"-OX|-OX|--X"],
Board.X)
# Reflected on left diagonal
self.assert_values_after_reward_symmetric_are(
[0.8088597, 0.8841065, 0.9468122, 0.9860032, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["X--|---|---",
"X--|O--|---",
"XX-|O--|---",
"XX-|OO-|---",
"XXX|OO-|---"],
Board.X)
# Reflected on right diagonal
self.assert_values_after_reward_symmetric_are(
[0.8518579, 0.9163551, 0.9647281, 0.9916019, 1.0],
["X--|---|---",
"XO-|---|---",
"XO-|X--|---",
"XO-|XO-|---",
"XO-|XO-|X--"],
["---|---|--X",
"---|--O|--X",
"---|--O|-XX",
"---|-OO|-XX",
"---|-OO|XXX"],
Board.X)
| UTF-8 | Python | false | false | 10,692 | py | 50 | test_td_symmetric_learning_player.py | 38 | 0.467826 | 0.434624 | 0 | 249 | 41.939759 | 107 |
adap/flower | 17,463,337,041,951 | 005c1fd8fa60986eef0a78bb6a4d4d4ca3e8915b | 1bc67a91d85a7106106ca31307ef9ee93f1d1a20 | /baselines/fedprox/fedprox/utils.py | 2fa3b64966bd3c1b37a29371000f499283473bac | [
"Apache-2.0"
]
| permissive | https://github.com/adap/flower | 4915d143c674eb675504d585e1e90ed06833812f | 55be690535e5f3feb33c888c3e4a586b7bdbf489 | refs/heads/main | 2023-08-17T01:18:12.168723 | 2023-08-16T17:17:48 | 2023-08-16T17:17:48 | 241,095,326 | 2,999 | 658 | Apache-2.0 | false | 2023-09-14T15:43:22 | 2020-02-17T11:51:29 | 2023-09-14T12:42:53 | 2023-09-14T15:43:21 | 150,424 | 3,019 | 611 | 352 | Python | false | false | """Contains utility functions for CNN FL on MNIST."""
import pickle
from pathlib import Path
from secrets import token_hex
from typing import Dict, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
from flwr.server.history import History
def plot_metric_from_history(
hist: History,
save_plot_path: Path,
suffix: Optional[str] = "",
) -> None:
"""Function to plot from Flower server History.
Parameters
----------
hist : History
Object containing evaluation for all rounds.
save_plot_path : Path
Folder to save the plot to.
suffix: Optional[str]
Optional string to add at the end of the filename for the plot.
"""
metric_type = "centralized"
metric_dict = (
hist.metrics_centralized
if metric_type == "centralized"
else hist.metrics_distributed
)
rounds, values = zip(*metric_dict["accuracy"])
# let's extract centralised loss (main metric reported in FedProx paper)
rounds_loss, values_loss = zip(*hist.losses_centralized)
fig, axs = plt.subplots(nrows=2, ncols=1, sharex="row")
axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss))
axs[1].plot(np.asarray(rounds_loss), np.asarray(values))
axs[0].set_ylabel("Loss")
axs[1].set_ylabel("Accuracy")
# plt.title(f"{metric_type.capitalize()} Validation - MNIST")
plt.xlabel("Rounds")
# plt.legend(loc="lower right")
plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png"))
plt.close()
def save_results_as_pickle(
history: History,
file_path: Union[str, Path],
extra_results: Optional[Dict] = {},
default_filename: Optional[str] = "results.pkl",
) -> None:
"""Saves results from simulation to pickle.
Parameters
----------
history: History
History returned by start_simulation.
file_path: Union[str, Path]
Path to file to create and store both history and extra_results.
If path is a directory, the default_filename will be used.
path doesn't exist, it will be created. If file exists, a
randomly generated suffix will be added to the file name. This
is done to avoid overwritting results.
extra_results : Optional[Dict]
A dictionary containing additional results you would like
to be saved to disk. Default: {} (an empty dictionary)
default_filename: Optional[str]
File used by default if file_path points to a directory instead
to a file. Default: "results.pkl"
"""
path = Path(file_path)
# ensure path exists
path.mkdir(exist_ok=True, parents=True)
def _add_random_suffix(path_: Path):
"""Adds a randomly generated suffix to the file name (so it doesn't
overwrite the file)."""
print(f"File `{path_}` exists! ")
suffix = token_hex(4)
print(f"New results to be saved with suffix: {suffix}")
return path_.parent / (path_.stem + "_" + suffix + ".pkl")
def _complete_path_with_default_name(path_: Path):
"""Appends the default file name to the path."""
print("Using default filename")
return path_ / default_filename
if path.is_dir():
path = _complete_path_with_default_name(path)
if path.is_file():
# file exists already
path = _add_random_suffix(path)
print(f"Results will be saved into: {path}")
data = {"history": history, **extra_results}
# save results to pickle
with open(str(path), "wb") as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
| UTF-8 | Python | false | false | 3,608 | py | 708 | utils.py | 455 | 0.646896 | 0.644956 | 0 | 112 | 31.214286 | 82 |
varunranganathan/TextSummarization | 11,081,015,642,221 | a5619b3db8553023bf240275bb64766a837ce7b7 | 7fb523f47a7f594e6368d5a29ecdda180c406004 | /Proper Nouns.py | 4824ca8fa2534c0a6ac27d77c5afe76f2b330453 | []
| no_license | https://github.com/varunranganathan/TextSummarization | dd3ea4839d14d2376766ba14d6ada206211b63af | d31d7c773c5fbaff202dae2e229fc71dbe2facac | refs/heads/master | 2021-01-22T03:17:45.689271 | 2017-02-08T15:26:40 | 2017-02-08T15:26:40 | 81,111,503 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import nltk.data
from nltk.tag import pos_tag
import re
from nltk.tokenize import word_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
f = open("text3.txt", 'r')
g = open("ans.txt",'w')
data = f.read()
i = 1
for p in data.split('\n\n'):
x = pos_tag(p.split())
for s in tokenizer.tokenize(p.strip()):
propernouns = [word for word,pos in tagged_sent if pos == 'NNP']
for a in propernouns:
g.write(propernouns[a])
| UTF-8 | Python | false | false | 447 | py | 4 | Proper Nouns.py | 4 | 0.680089 | 0.675615 | 0 | 15 | 28.466667 | 65 |
naparuba/opsbro | 2,422,361,569,094 | 7020172443e5216d9429507b96b8bbebf3e6a7f9 | abaa806550f6e6e7bcdf71b9ec23e09a85fe14fd | /opsbro/log.py | 390b4f5263f708e64a1b9b7b9ddc9ff07478dec4 | [
"MIT"
]
| permissive | https://github.com/naparuba/opsbro | 02809ddfe22964cd5983c60c1325c965e8b02adf | 98618a002cd47250d21e7b877a24448fc95fec80 | refs/heads/master | 2023-04-16T08:29:31.143781 | 2019-05-15T12:56:11 | 2019-05-15T12:56:11 | 31,333,676 | 34 | 7 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import datetime
import logging
import json
import codecs
import shutil
from glob import glob
from threading import Lock as ThreadLock
from multiprocessing.sharedctypes import Value
from ctypes import c_int
PY3 = sys.version_info >= (3,)
if PY3:
unicode = str
basestring = str
from .misc.colorama import init as init_colorama
# Lasy load to avoid recursive import
string_decode = None
bytes_to_unicode = None
# Keep 7 days of logs
LOG_ROTATION_KEEP = 7 + 1
# For testing purpuse, we prefer to have lower value here
if 'FORCE_LOG_ROTATION_PERIOD' in os.environ:
LOG_ROTATION_KEEP = int(os.environ['FORCE_LOG_ROTATION_PERIOD'])
DEFAULT_LOG_PART = 'daemon'
def is_tty():
# TODO: what about windows? how to have beautiful & Windows?
# on windows, we don't know how to have cool output
if os.name == 'nt':
# We must consider the classic CMD as a no tty, as it's just too limited
if os.environ.get('ANSICON', '') == '':
return False
# Look if we are in a tty or not
if hasattr(sys.stdout, 'isatty'):
return sys.stdout.isatty()
return False
if is_tty():
# Try to load the terminal color. Won't work under python 2.4
try:
from .misc.termcolor import cprint, sprintf
# init the colorama hook, for windows print
# will do nothing for other than windows
init_colorama()
except (SyntaxError, ImportError) as exp:
# Outch can't import a cprint, do a simple print
def cprint(s, color='', on_color='', end='\n'):
print(s, end=end)
# Also overwrite sprintf
def sprintf(s, color='', end=''):
return s
# Ok it's a daemon mode, if so, just print
else:
# Protect sys.stdout write for utf8 outputs
import codecs
stdout_utf8 = codecs.getwriter("utf-8")(sys.stdout)
# stdout_utf8.errors = 'ignore'
def cprint(s, color='', on_color='', end='\n'):
global string_decode, bytes_to_unicode
if os.name == 'nt' and hasattr(sys.stdout, 'is_null_write'): # maybe we are in a windows service, so skip printing
return
if string_decode is None:
from .util import string_decode
string_decode = string_decode
if bytes_to_unicode is None:
from .util import bytes_to_unicode
bytes_to_unicode = bytes_to_unicode
if not isinstance(s, basestring):
s = str(s)
# Python 2 and 3: good luck for unicode in a terminal.
# It's a nightmare to manage all of this, if you have a common code
# that allow to run WITHOUT a terminal, I take it :)
if PY3:
s = string_decode(s)
raw_bytes, consumed = stdout_utf8.encode(s, 'strict')
# We have 2 cases:
# * (default) sys.stdout is a real tty we did hook
# * (on test case by nose) was changed by a io.Stdout that do not have .buffer
end_line = b'\n'
if hasattr(sys.stdout, 'buffer'):
write_into = sys.stdout.buffer
else:
write_into = sys.stdout
raw_bytes = bytes_to_unicode(raw_bytes) # ioString do not like bytes
end_line = '\n'
if end == '':
write_into.write(raw_bytes)
else:
write_into.write(raw_bytes)
write_into.write(end_line)
else: # PY2
if end == '':
stdout_utf8.write(s)
else:
stdout_utf8.write(s)
stdout_utf8.write('\n')
def sprintf(s, color='', end=''):
return s
def get_unicode_string(s):
if isinstance(s, str) and not PY3:
return unicode(s, 'utf8', errors='ignore')
return unicode(s)
loggers = {}
class Logger(object):
def __init__(self):
self.data_dir = ''
self.name = ''
self.logs = {}
self.registered_parts = {}
self.level = logging.INFO
self.is_force_level = False # is the currently level is a force one or not (force by API or by CLI args for example)
self.linkify_methods()
# We will keep last 20 errors
self.last_errors_stack_size = 20
self.last_errors_stack = {'DEBUG': [], 'WARNING': [], 'INFO': [], 'ERROR': []}
self.last_date_print_time = 0
self.last_date_print_value = ''
self.last_rotation_day = Value(c_int, 0) # shared epoch of the last time we did rotate, round by 86400
# Log will be protected by a lock (for rotating and such things)
# WARNING the lock is link to a pid, if used on a sub process it can fail because
# the master process can have aquire() it and so will never unset it in your new process
self.log_lock = None
self.current_lock_pid = os.getpid()
# Get (NOT aquire) current lock, but beware: if we did change process, recreate it
def _get_lock(self):
cur_pid = os.getpid()
if self.log_lock is None or self.current_lock_pid != cur_pid:
self.log_lock = ThreadLock()
self.current_lock_pid = os.getpid()
return self.log_lock
# A code module register it's
def register_part(self, pname):
# By default we show it if the global level is ok with this
self.registered_parts[pname] = {'enabled': True}
def linkify_methods(self):
methods = {'DEBUG': self.do_debug, 'WARNING': self.do_warning, 'INFO': self.do_info, 'ERROR': self.do_error}
for (s, m) in methods.items():
level = getattr(logging, s)
# If the level is enough, link it
if level >= self.level:
setattr(self, s.lower(), m)
else:
setattr(self, s.lower(), self.do_null)
def _get_log_file_path(self, fname):
return os.path.join(self.data_dir, fname)
def _get_log_open(self, fname):
full_path = self._get_log_file_path(fname)
fd = codecs.open(full_path, 'ab', encoding="utf-8")
return fd
def load(self, data_dir, name):
self.name = name
self.data_dir = data_dir
# We can start with a void data dir
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
self._get_log_file_and_rotate_it_if_need(part=DEFAULT_LOG_PART) # create default part as daemon.log
# If a level is set to force, a not foce setting is not taken
# if a level we set by force before, and this call is not a force one, skip this one
def setLevel(self, s, force=False):
if not force and self.is_force_level:
return
if force:
self.is_force_level = True
try:
level = getattr(logging, s.upper())
if not isinstance(level, int):
raise AttributeError
self.level = level
except AttributeError:
self.error('Invalid logging level configuration %s' % s)
return
self.linkify_methods()
def get_errors(self):
return self.last_errors_stack
def __get_time_display(self):
now = int(time.time())
# Cache hit or not?
if now == self.last_date_print_time:
return self.last_date_print_value
# save it
# NOTE: I know there is a small chance of thread race issue, but he, I don't fucking care about a 1s issue delay, deal with it.
self.last_date_print_value = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')
return self.last_date_print_value
@staticmethod
def __get_day_string_for_log(epoch):
return datetime.datetime.fromtimestamp(epoch).strftime('%Y-%m-%d')
# We will find all .log file and rotate them to the yesterday day
# NOTe: if not running for several days, this will make past log from yesterday, I know
# and it's ok
def _check_log_rotation(self):
now = int(time.time())
current_day_nb, current_day_offset = divmod(now, 86400)
if current_day_nb == self.last_rotation_day.value:
return
# Maybe we are just starting, if so, do not rotate, no luck for old files and
# was not running at midnight
if self.last_rotation_day.value == 0:
self.last_rotation_day.value = current_day_nb
return
self.last_rotation_day.value = current_day_nb # warn ourselve but aso other sub process
# As we will rotate them, we need to close all files
# note: clean all logs entries, will be reopen when need
for (part, f) in self.logs.items():
f.close()
self.logs.clear()
# ok need to rotate
in_yesterday = (current_day_nb * 86400) - 1
yesterday_string = self.__get_day_string_for_log(in_yesterday)
# At which time the file is too old to be kept?
too_old_limit = (current_day_nb * 86400) - (LOG_ROTATION_KEEP * 86400) # today minus - days
# Current logs: move to old day
current_logs = glob(os.path.join(self.data_dir, '*.log'))
for file_path in current_logs:
# Maybe the file is too old, if so, delete it
self._do_rotate_one_log(file_path, yesterday_string)
old_log_files = glob(os.path.join(self.data_dir, '*.log.*'))
for file_path in old_log_files:
# Maybe the file is too old, if so, delete it
date = os.stat(file_path).st_mtime
if date < too_old_limit:
try:
os.unlink(file_path)
except OSError: # oups, cannot remove, but we are the log, cannot log this...
pass
@staticmethod
def _do_rotate_one_log(base_full_path, yesterday_string):
if os.path.exists(base_full_path):
dest_path = base_full_path + '.' + yesterday_string
shutil.move(base_full_path, dest_path)
# note that under docker if you change the container date, the file will be the host
# time, so will fail to detect file rotation so we force our time for this file
now = int(time.time())
os.utime(dest_path, (now, now)) # note: if using times = None, will set real time in docker
def _get_log_file_and_rotate_it_if_need(self, part):
self._check_log_rotation()
# classic part log
f = self.logs.get(part, None)
if f is None: # was rotated or maybe rotated
log_name = '%s.log' % part
f = self._get_log_open(log_name)
self.logs[part] = f
return self.logs[part]
def log(self, *args, **kwargs):
# We must protect logs against thread access, and even sub-process ones
with self._get_lock():
part = kwargs.get('part', DEFAULT_LOG_PART)
s_part = '' if not part else '[%s]' % part.upper()
d_display = self.__get_time_display()
s = '[%s][%s][%s] %s: %s' % (d_display, kwargs.get('level', 'UNSET '), self.name, s_part, u' '.join([get_unicode_string(s) for s in args]))
# Sometime we want a log output, but not in the stdout
if kwargs.get('do_print', True):
if 'color' in kwargs:
cprint(s, color=kwargs['color'])
else:
print(s)
stack = kwargs.get('stack', False)
# Not a perf problems as it's just for errors and a limited size
if stack:
self.last_errors_stack[stack].append(s)
# And keep only the last 20 ones for example
self.last_errors_stack[stack] = self.last_errors_stack[stack][-self.last_errors_stack_size:]
# if no data_dir, we cannot save anything...
if self.data_dir == '':
return
s = s + '\n'
# Now get the log file and write into it
# NOTE: if need, will rotate all files
f = self._get_log_file_and_rotate_it_if_need(part)
f.write(s)
f.flush()
# Now update the log listener if exis
listener = kwargs.get('listener', '')
if listener and hasattr(os, 'O_NONBLOCK') and f is not None: # no named pipe on windows
try:
fd = os.open(listener, os.O_WRONLY | os.O_NONBLOCK)
os.write(fd, s)
os.close(fd)
except Exception as exp: # maybe the path did just disapear
s = "ERROR LISTERNER %s" % exp
f.write(s)
def do_debug(self, *args, **kwargs):
self.log(*args, level='DEBUG', color='magenta', **kwargs)
def do_info(self, *args, **kwargs):
self.log(*args, level='INFO', color='blue', **kwargs)
def do_warning(self, *args, **kwargs):
self.log(*args, level='WARNING', color='yellow', stack='WARNING', **kwargs)
def do_error(self, *args, **kwargs):
self.log(*args, level='ERROR', color='red', stack='ERROR', **kwargs)
def do_null(self, *args, **kwargs):
pass
def export_http(self):
from .httpdaemon import http_export, response
@http_export('/log/parts/')
def list_parts():
response.content_type = 'application/json'
return json.dumps(loggers.keys())
core_logger = Logger()
class PartLogger(object):
def __init__(self, part):
self.part = part
self.listener_path = '/tmp/opsbro-follow-%s' % part
def debug(self, *args, **kwargs):
kwargs['part'] = kwargs.get('part', self.part)
if os.path.exists(self.listener_path):
kwargs['listener'] = self.listener_path
kwargs['level'] = 'DEBUG '
core_logger.log(*args, color='magenta', **kwargs)
return
core_logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
kwargs['part'] = kwargs.get('part', self.part)
if os.path.exists(self.listener_path):
kwargs['listener'] = self.listener_path
kwargs['level'] = 'INFO '
core_logger.log(*args, color='blue', **kwargs)
return
core_logger.info(*args, **kwargs)
def warning(self, *args, **kwargs):
kwargs['part'] = kwargs.get('part', self.part)
if os.path.exists(self.listener_path):
kwargs['listener'] = self.listener_path
kwargs['level'] = 'WARNING'
core_logger.log(*args, color='yellow', **kwargs)
return
core_logger.warning(*args, **kwargs)
def error(self, *args, **kwargs):
kwargs['part'] = kwargs.get('part', self.part)
if os.path.exists(self.listener_path):
kwargs['listener'] = self.listener_path
kwargs['level'] = 'ERROR '
core_logger.log(*args, color='red', **kwargs)
return
core_logger.error(*args, **kwargs)
def log(self, *args, **kwargs):
kwargs['part'] = kwargs.get('part', self.part)
if os.path.exists(self.listener_path):
kwargs['listener'] = self.listener_path
core_logger.log(*args, **kwargs)
return
core_logger.log(*args, **kwargs)
def setLevel(self, s, force=False):
core_logger.setLevel(s, force=force)
def load(self, data_dir, name):
core_logger.load(data_dir, name)
# Create logger for a specific part if not already exists
class LoggerFactory(object):
@classmethod
def create_logger(cls, part):
if part in loggers:
return loggers[part]
loggers[part] = PartLogger(part)
return loggers[part]
logger = LoggerFactory.create_logger(DEFAULT_LOG_PART)
| UTF-8 | Python | false | false | 16,314 | py | 523 | log.py | 276 | 0.560071 | 0.556332 | 0 | 466 | 34.008584 | 152 |
chplushsieh/carvana-challenge | 10,462,540,359,739 | b9d69e7c9ec157031b8893868b2eb7b15e7167c4 | a7211f3f0ef6cbb96a796e502062656681dcdf9b | /util/ensemble.py | 88f49d880456096509ccd9241063af58d2bc4fec | [
"MIT"
]
| permissive | https://github.com/chplushsieh/carvana-challenge | fdc3e78966a37f95f3e60a179a511705cc0da55f | cba536657714df7c1c33150b92e3e152195b68db | refs/heads/master | 2021-01-01T19:40:08.442482 | 2017-10-05T17:53:29 | 2017-10-05T17:53:29 | 98,639,822 | 24 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import csv
import numpy as np
import util.const as const
import util.exp as exp
def create_file_if_not_exist(file_path):
'''
create empty file if it doesn't exist
'''
if not os.path.isfile(file_path):
open(file_path, 'a').close()
return
def create_models_ensembled(pred_dirs, ensemble_dir):
'''
create ./output/<ensemble_dir>/models_ensembled.txt
'''
# create self.ensemble_dir/models_ensembled.txt
for pred_dir in pred_dirs:
exp_names, test_time_aug_names = get_models_ensembled(pred_dir)
for exp_name, test_time_aug_name in zip(exp_names, test_time_aug_names):
mark_model_ensembled(ensemble_dir, exp_name, test_time_aug_name)
return
def get_models_ensembled(ensemble_dir):
'''
return ensembled models along with their test time augmentations by reading ./output/<ensemble_dir>/models_ensembled.txt
'''
model_names = []
test_time_aug_names = []
ensembled_models_path = os.path.join(const.OUTPUT_DIR, ensemble_dir, 'models_ensembled.txt')
create_file_if_not_exist(ensembled_models_path)
# read file content
with open(ensembled_models_path, newline='', ) as f:
reader = csv.reader(f)
for row in reader:
model_names.append(row[0])
test_time_aug_names.append(row[1])
return model_names, test_time_aug_names
def mark_model_ensembled(ensemble_dir, exp_name, test_time_aug_name):
'''
add newly ensembled model as a new line to ./output/<ensemble_dir>/models_ensembled.txt
'''
ensemble_dir_path = os.path.join(const.OUTPUT_DIR, ensemble_dir)
exp.create_dir_if_not_exist(ensemble_dir_path)
ensembled_models_path = os.path.join(ensemble_dir_path, 'models_ensembled.txt')
create_file_if_not_exist(ensembled_models_path)
# open file in 'append' mode
with open(ensembled_models_path, 'a', newline='') as f:
f.write(exp_name + ',' + test_time_aug_name + '\n') # insert as the last line
return
def get_ensemble_weights(ensemble_dirs):
'''
return ensembling weightes by reading ./output/<ensemble_dir>/models_ensembled.txt
'''
total_models = 0
weights = np.zeros(len(ensemble_dirs))
for i, ensemble_dir in enumerate(ensemble_dirs):
ensembled_model_names, _ = get_models_ensembled(ensemble_dir)
num_models_used = len(ensembled_model_names)
total_models += num_models_used
weights[i] = num_models_used
weights = np.divide(weights, total_models)
return weights
| UTF-8 | Python | false | false | 2,548 | py | 35 | ensemble.py | 30 | 0.663658 | 0.66248 | 0 | 84 | 29.333333 | 124 |
johanrueda/ProyectoCnyt-Tercer-tercio | 6,631,429,533,922 | b5c209f98b5a285211822efee8542378a1b626a0 | 145ce0a7dcd871e3f321b988c3c89ab084deec53 | /tercerTCnyt/libreria_vectores_matrices.py | 3e750576a2534b59975eb0ae4043f03463e8a575 | []
| no_license | https://github.com/johanrueda/ProyectoCnyt-Tercer-tercio | 7d2e33d002b07cc45b2a18abb486996bd8ba4b83 | c4a7d58a5fc113435a8bf58d792088d27eb11db8 | refs/heads/master | 2022-06-29T17:06:22.665599 | 2020-05-14T02:11:51 | 2020-05-14T02:11:51 | 263,791,450 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import libreria as c
def sumaVectores(v1,v2):
"""
Se ingresa cada vector, cada componente del vector es una tupla
que contiene la parte real y la parte imaginaria, retorna la suma
de los vectores complejos
"""
total=[]
if (len(v1)==len(v2)):
for i in range(len(v1)):
total.append(c.suma(v1[i][0],v2[i][0]))
return(total)
else:
return ("No son compatibles")
def restaVectores(v1,v2):
"""
Se ingresa cada vector, cada componente del vector es una tupla
que contiene la parte real y la parte imaginaria, retorna la resta
de los vectores complejos
"""
total=[]
if (len(v1)==len(v2)):
for i in range(len(v1)):
total.append(c.resta(v1[i][0],v2[i][0]))
return(total)
else:
return ("No son compatibles")
def inversoAditivo(v1):
"""
"""
total=[]
for i in range(len(v1)):
total.append(c.producto(v1[i][0],[-1,0]))
return total
def escalarVector(v1,e):
"""
Se ingresa cada vector, cada componente del vector es una tupla
que contiene la parte real y la parte imaginaria, retorna
la multiplicacion del vector por un escalar complejo
"""
total=[]
for i in range(len(v1)):
total.append(c.producto(v1[i][0],e))
return total
def sumaMatrices(m1,m2):
"""
Ingresa 2 matrices de tamaño MxN, retorna la suma de las matrices
"""
total=[]
if (len(m1)==len(m2)):
for i in range(len(m1)):
for j in range(len(m1[0])):
total.append(c.suma(m1[i][j],m2[i][j]))
return total
else:
return ("No sea marica,debe ser del mismo tamaño")
def inversoAditivoM(m1):
"""
Ingresa una matriz MxN, retorna el inverso aditivo de la matriz
"""
total=[]
for i in range(len(m1)):
for j in range(len(m1[0])):
total.append(c.producto(m1[i][j],[-1,0]))
return total
def escalarMatriz(m1,e):
""""
Ingresa una matriz MxN y un escalar complejo, retorna la multiplicacion de la
matriz por un escalar complejo
"""
total=[]
for i in range(len(m1)):
for j in range(len(m1[0])):
total.append(c.producto(m1[i][j],e))
return total
def transpuesta(m1):
"""
Ingresa una matriz MxN, y retorna la transpuesta de la matriz.
"""
total=[[None for i in range(len(m1))]for i in range(len(m1[0]))]
for i in range(len(m1)):
for j in range(len(m1[0])):
total[j][i]=m1[i][j]
return total
def conjugadoM(m1):
"""
Ingresa una matriz MxN, retorna el conjugado de la matriz.
"""
total=[]
fila=[]
for i in range(len(m1)):
for j in range(len(m1[0])):
fila.append(c.conjugado(m1[i][j]))
total.append(fila)
fila=[]
return total
def daga(m1):
"""
Ingresa una matriz MxN, retorna la adjunta de la matriz.
"""
return transpuesta(conjugadoM(m1))
def productoM(m1,m2):
"""
Se ingresa cada matriz, cada componente de la matriz es una tupla
que contiene la parte real y la parte imaginaria, retorna la multiplicacion
de las matrices complejas
"""
filas,filas2=len(m1),len(m2)
columnas,columnas2=len(m1[0]),len(m2[0])
if columnas == filas2:
total=[[[0,0] for columnas in range(columnas2)]for filas in range(filas)]
for i in range(filas):
for j in range(columnas2):
for k in range(len(m2)):
total[i][j]=c.suma(total[i][j],c.producto(m1[i][k],m2[k][j]))
return total
else:
return "No son compatibles"
def generaridentidad(n):
"""
Ingresa n, como el tamaño de la matriz, y retorna la matriz identidad de tamaño
n.
"""
matriz = []
for i in range(n):
l = []
for j in range(n):
l.append([1,0])
matriz.append(l)
return matriz
def accion(m1,m2):
"""
Ingresa una matriz compleja de tamaño MxN y un vector complejo, y retorna la
multiplicacion de la matriz por el vector
"""
return productoM(m1,m2)
def productoInternoV(v1,v2):
"""
Ingresa 2 vectores complejos, retorna el producto interno de los vectores
"""
d= daga(v1)
return productoM(d,v2)
def normaVector(vector):
"""
Ingresa un vector complejo y retorna la norma del vector
"""
return (productoInternoV(vector,vector))**0.5
def distanciaEntreVectores(vector1,vector2):
"""
Se ingresan 2 vectores complejos , retorna la distancia entre estos
"""
if len(vector1) != len(vector2):
return 'Los vectores no tienen la misma longitud, su producto interno no esta definido'
return c.modulo(restaVectores(vector1,vector2))
def esUnitaria(matriz):
"""
Ingresa una matriz cuadrada MxN, Retorna verdadero si la matriz es unitaria
"""
if len(matriz) != len(matriz[0]):
return 'La matriz no es cuadrada'
else:
total= productoM(matriz,daga(matriz))
ident= identidadM(len(matriz))
if total == ident:
return True
return False
def esHermitiana(matriz):
"""
Ingresa una matriz cuadrada MxN, Retorna verdadero si la matriz es hermitiana
(igual a su propia traspuesta conjugada)
"""
if len(matriz) != len(matriz[0]):
return 'La matriz no es cuadrada'
return matriz == daga(matriz)
def productoTensor(matriz1,matriz2):
"""
Ingresa una matriz cuadrada MxN,retorna el producto tensor entre estos
"""
aux = []
subLista = []
conta = len(matriz2)
for i in matriz1:
valorB = 0
valorA = 0
while valorA < conta:
for num1 in i:
for num2 in matriz2[valorB]:
subLista.append(c.producto(num1,num2))
aux.append(subLista)
subLista = []
valorA +=1
valorB += 1
return aux
| UTF-8 | Python | false | false | 6,215 | py | 4 | libreria_vectores_matrices.py | 2 | 0.567955 | 0.548953 | 0 | 213 | 27.14554 | 95 |
dbw9580/sigal | 996,432,420,418 | a162cde8f7bc9d1d5e74ea0bbc34049fb840bc1d | 6d88062a119da66f345b9373b00e6f42fbb87ba6 | /tests/test_plugins.py | 1e464c2fb055e7687964a0962f39716bd7e839e2 | [
"MIT"
]
| permissive | https://github.com/dbw9580/sigal | 1835aa4f53eda0242288b2101c563dd2638eb828 | 4cc7f5f4d2d2090f3335076e4c133003da7bafb7 | refs/heads/master | 2023-07-20T04:58:03.221526 | 2023-06-03T15:57:09 | 2023-06-03T15:57:09 | 251,307,690 | 0 | 0 | MIT | true | 2020-03-30T13:07:54 | 2020-03-30T13:07:54 | 2020-03-17T05:56:23 | 2020-03-16T03:07:18 | 5,936 | 0 | 0 | 0 | null | false | false | import os
from sigal import init_plugins
from sigal.gallery import Gallery
CURRENT_DIR = os.path.dirname(__file__)
def test_plugins(settings, tmpdir, disconnect_signals):
settings["destination"] = str(tmpdir)
if "sigal.plugins.nomedia" not in settings["plugins"]:
settings["plugins"] += ["sigal.plugins.nomedia"]
if "sigal.plugins.media_page" not in settings["plugins"]:
settings["plugins"] += ["sigal.plugins.media_page"]
init_plugins(settings)
gal = Gallery(settings)
gal.build()
out_html = os.path.join(
settings["destination"], "dir2", "KeckObservatory20071020.jpg.html"
)
assert os.path.isfile(out_html)
for path, dirs, files in os.walk(os.path.join(str(tmpdir), "nomedia")):
assert "ignore" not in path
for file in files:
assert "ignore" not in file
def test_nonmedia_files(settings, tmpdir, disconnect_signals):
settings["destination"] = str(tmpdir)
settings["plugins"] += ["sigal.plugins.nonmedia_files"]
settings["nonmedia_files_options"] = {"thumb_bg_color": "red"}
init_plugins(settings)
gal = Gallery(settings)
gal.build()
outfile = os.path.join(settings["destination"], "nonmedia_files", "dummy.pdf")
assert os.path.isfile(outfile)
outthumb = os.path.join(
settings["destination"], "nonmedia_files", "thumbnails", "dummy.tn.jpg"
)
assert os.path.isfile(outthumb)
def test_titleregexp(settings, tmpdir, disconnect_signals):
if "sigal.plugins.titleregexp" not in settings["plugins"]:
settings["plugins"] += ["sigal.plugins.titleregexp"]
init_plugins(settings)
gal = Gallery(settings)
gal.build()
assert gal.albums.get("dir1").albums[1].title == "titleregexp 02"
| UTF-8 | Python | false | false | 1,763 | py | 1 | test_plugins.py | 1 | 0.664776 | 0.657402 | 0 | 59 | 28.881356 | 82 |
honmaple/maple-file | 16,149,077,068,392 | af53f6506c4be6b00d570bfd154f952a6948b7a8 | dd42c63443163e7c80edd0d62885bd07614b8ec2 | /storage/__init__.py | 1ea36758f8ba4d7589c00156497f1cf607468432 | []
| permissive | https://github.com/honmaple/maple-file | 2b61dab68a1d231aa4b82641e46ec5b3f9c8ece0 | bdcbc6c569ee9dedb26cfd6b3387c7f22c559fc6 | refs/heads/master | 2022-11-25T17:15:43.949591 | 2019-07-12T12:08:37 | 2019-07-12T12:08:37 | 84,726,235 | 41 | 15 | BSD-3-Clause | false | 2022-11-22T03:22:44 | 2017-03-12T13:07:11 | 2022-06-14T08:23:51 | 2022-11-22T03:22:41 | 44 | 37 | 14 | 2 | Python | false | false | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2019 jianglin
# File Name: __init__.py
# Author: jianglin
# Email: mail@honmaple.com
# Created: 2017-03-12 20:16:21 (CST)
# Last Update: Wednesday 2019-01-09 14:12:04 (CST)
# By:
# Description:
# **************************************************************************
from flask import Flask
from storage import auth, server, admin, extension, router
def create_app(config):
app = Flask(__name__)
app.config.from_object(config)
extension.init_app(app)
auth.init_app(app)
server.init_app(app)
admin.init_app(app)
router.init_app(app)
return app
| UTF-8 | Python | false | false | 728 | py | 16 | __init__.py | 14 | 0.519945 | 0.474553 | 0 | 25 | 28.08 | 76 |
raffivar/NetworkPy | 5,832,565,610,006 | 103f3870df4a04a1d8010225b692d5b2ec4a7be9 | 221b7d486674de2c521acf5d3513426bd69c7c3a | /unit_3/data_loss/server_udp.py | 214d784c9eb20a9a4496576dc7283b997159df53 | []
| no_license | https://github.com/raffivar/NetworkPy | 08e47395db39388008aea7bb9cb09c37bb91d0d6 | 304732b98d2d114082563df1482966b084f1fabe | refs/heads/main | 2023-08-20T05:29:42.463334 | 2021-10-12T12:12:00 | 2021-10-12T12:12:00 | 402,143,751 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import socket
SERIAL_NUMBER_FIELD_SIZE = 4
MAX_SERIAL_NUM = 10000
SERVER_IP = "0.0.0.0"
PORT = 8821
MAX_MSG_SIZE = 1024
def special_sendto(socket_object, response, client_address):
fail = random.randint(1, 3)
if not (fail == 1):
socket_object.sendto(response.encode(), client_address)
else:
print("Oops")
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind((SERVER_IP, PORT))
request_serial_number = 0
while True:
(client_message, client_address) = server_socket.recvfrom(MAX_MSG_SIZE)
data = client_message.decode()
if data.lower() == "exit":
break
response = "echo '{}'".format(data)
special_sendto(server_socket, response, client_address)
serial_number_field = str(request_serial_number).zfill(SERIAL_NUMBER_FIELD_SIZE)
# Here is where you would enter code checking if the request already existed
request_serial_number += 1
if request_serial_number == MAX_SERIAL_NUM:
request_serial_number = 0
server_socket.close()
| UTF-8 | Python | false | false | 1,053 | py | 14 | server_udp.py | 12 | 0.688509 | 0.665717 | 0 | 37 | 27.459459 | 84 |
pichouk/bikes-data | 15,702,400,481,487 | 8811deb04305952dab0b7a5f2ac6b8fe5bb2ef34 | eae6bf56896b39bce0ca28161d60156ffeb2475a | /jcdecaux/__init__.py | a220dbf2226dc2d9f6816f9535e4393752da298b | []
| no_license | https://github.com/pichouk/bikes-data | afc45bbe85ca0e039a49dae7f1a60da94a6ba2a5 | 41a4929d3ce59afba15fc2c02fea3f322f3c5006 | refs/heads/master | 2020-04-03T04:02:48.872573 | 2019-04-22T17:50:50 | 2019-04-22T17:50:50 | 155,001,408 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
"""Driver for JCDecaux API."""
from .driver import JCDecauxDriver
| UTF-8 | Python | false | false | 83 | py | 7 | __init__.py | 4 | 0.722892 | 0.710843 | 0 | 5 | 15.6 | 34 |
karthikpappu/pyc_source | 18,202,071,422,978 | 39651401b0f5dfa72af4ce79b780a9b4133dadc0 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/sniorfy-0.2.0-py2.7/stpserver.py | bbc4c1ae2df3e39454da73251c6682a3f329b4e1 | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.macosx-10.4-x86_64/egg/sniorfy/stpserver.py
# Compiled at: 2012-06-29 04:41:12
import socket
from sniorfy.ioloop.netutil import TCPServer
class STPServer(TCPServer):
def __init__(self, request_callback, io_loop=None, application=None, **kwargs):
self.request_callback = request_callback
self.application = application
TCPServer.__init__(self, io_loop=io_loop, **kwargs)
def handle_stream(self, stream, address):
STPConnection(stream, address, self.request_callback, self.application)
class STPConnection(object):
def __init__(self, stream, address, request_callback, application):
self.stream = stream
self.application = application
if self.stream.socket.family not in (socket.AF_INET, socket.AF_INET6):
address = ('0.0.0.0', 0)
self.address = address
self.request_callback = request_callback
self._request = STPRequest(self)
self._request_finished = False
self.read_arg()
def read_arg(self):
self.stream.read_until('\r\n', self._on_arglen)
def _on_arglen(self, data):
if data == '\r\n':
self.request_callback(self._request)
self._request = STPRequest(self)
self.read_arg()
else:
try:
arglen = int(data[:-2])
except Exception as e:
raise e
self.stream.read_bytes(arglen, self._on_arg)
def _on_arg(self, data):
self._request.add_arg(data)
self.stream.read_until('\r\n', self._on_strip_arg_endl)
def _on_strip_arg_endl(self, data):
self.read_arg()
class STPRequest(object):
def __init__(self, connection):
self._argv = []
self.connection = connection
def add_arg(self, arg):
self._argv.append(arg)
@property
def argv(self):
return self._argv | UTF-8 | Python | false | false | 2,062 | py | 114,545 | stpserver.py | 111,506 | 0.611057 | 0.583414 | 0 | 69 | 28.898551 | 83 |
bdhafin/Python | 2,327,872,283,173 | 249463d4f263c24a34c781f86e8db8b8d98ea9ea | 02f99561e4e316c24e51f5feafe69f06dbe528be | /#23Membuat Package/sains/__init__.py | efa42f3c118a7eab7c84046999476a98a6b78f06 | []
| no_license | https://github.com/bdhafin/Python | 789eda948847e03779a3f204ec75e9b20c6901df | a6e265ec153322939481d0f2f6fd30f66b660a71 | refs/heads/master | 2020-09-13T18:27:34.398925 | 2019-11-22T11:39:25 | 2019-11-22T11:39:25 | 222,867,989 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Ini file yang akan di eksekusi ketika panggil sains
#mengambil dari module lain
#artinya . (titik) adalah di folder sekarang
from .matematika import *
#mengambil module fisika
from .fisika import * | UTF-8 | Python | false | false | 201 | py | 42 | __init__.py | 41 | 0.781095 | 0.781095 | 0 | 8 | 24.25 | 52 |
anshu1106/us_catalog | 15,333,033,251,514 | d2f401a73b1148cdfd573dd6f134b3ddbf9ae865 | 89106b77809d933fb0c1a96a5cdd35446fa6650b | /catalog/admin.py | d28fac027212066b73f45c79dfb2db5c8b5dd2c4 | []
| no_license | https://github.com/anshu1106/us_catalog | 3635d5c25202747e2cefde0a19fd5d0ca0d5ea30 | 050b954db9694adbc9fb9e79b71073c0094a7df2 | refs/heads/master | 2021-01-19T17:18:28.174374 | 2017-02-19T08:05:43 | 2017-02-19T08:05:43 | 82,444,332 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import *
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin import FieldListFilter
from django.utils.safestring import mark_safe
import csv
from django.http import HttpResponse
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
# Register your models here.
def makeRangeFieldListFilter(lookups, nullable=False):
class RangeFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.range_params = dict([(k, v) for k, v in params.items()
if k.startswith(self.field_generic)])
self.lookup_kwarg_start = '%s__gte' % field_path
self.lookup_kwarg_stop = '%s__lt' % field_path
self.lookup_kwarg_null = '%s__isnull' % field_path
self.links = [ (_('Any value'), {}), ]
for name, start, stop in lookups:
query_params = {}
if start is not None:
query_params[self.lookup_kwarg_start] = str(start)
if stop is not None:
query_params[self.lookup_kwarg_stop] = str(stop)
self.links.append((name, query_params))
if nullable:
self.links.append((_('Unknown'), {
self.lookup_kwarg_null: 'True'
}))
super(RangeFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [
self.lookup_kwarg_start,
self.lookup_kwarg_stop,
self.lookup_kwarg_null
]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.range_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
return RangeFieldListFilter
def export_as_csv_action(description="Export selected objects as CSV file",
fields=None, exclude=None, header=True):
def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
based on http://djangosnippets.org/snippets/1697/
"""
opts = modeladmin.model._meta
field_names = set([field.name for field in opts.fields])
if fields:
fieldset = set(fields)
field_names = field_names & fieldset
elif exclude:
excludeset = set(exclude)
field_names = field_names - excludeset
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')
writer = csv.writer(response)
if header:
writer.writerow(list(field_names))
for obj in queryset:
writer.writerow([unicode(getattr(obj, field)).encode("utf-8","replace") for field in field_names])
return response
export_as_csv.short_description = description
return export_as_csv
export_as_csv.short_description = description
return export_as_csv
class CatalogAdmin(admin.ModelAdmin):
list_display = (
'brand', 'category', 'item_name', 'max_retail_price','client_price','photo','image_tag')
readonly_fields = ('image_tag',)
actions = [export_as_csv_action("CSV Export", fields=['brand', 'category', 'item_name', 'max_retail_price','client_price','photo','image_tag',])]
#fields = ( 'image_tag', )
#readonly_fields = ('image_tag',)
list_filter = ('brand','category',
('max_retail_price', makeRangeFieldListFilter([
(_('Less than 100'), None, 100),
(_('100 to 500'), 100, 500),
(_('500 to 1000'), 500, 1000),
(_('1000 to 7500'), 1000, 7500),
(_('7500 to 15000'), 7500, 15000),
(_('15000 to 30000'), 15000, 30000),
(_('At least 30000'), 30000, None),
], nullable=True)) )
# search_fields = ['user__username', 'phone']
admin.site.register(Brand)
admin.site.register(Category)
admin.site.register(Catalog, CatalogAdmin)
| UTF-8 | Python | false | false | 4,480 | py | 3 | admin.py | 3 | 0.56942 | 0.546875 | 0 | 120 | 36.291667 | 149 |
odolshinu/rfid_appengine | 5,600,637,382,750 | 2ace9dfc71808f06b86f8ecdabc57d84009d2c21 | 9245401f3d1e2143a2ec622bda29b968a0719509 | /appengine.py | 968dd23a92d8f80d03f4eeefc5109469d551d7ec | []
| no_license | https://github.com/odolshinu/rfid_appengine | 2ef5d15c34dc9554497ca6668fbf381c6012c5c0 | ef8851d9c515103faa5e35a5b6244d8b80d5e269 | refs/heads/master | 2021-01-25T05:35:39.224823 | 2010-09-23T14:47:36 | 2010-09-23T14:47:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from google.appengine.ext.webapp import template
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class Store(db.Model):
place = db.StringProperty()
sequence = db.StringProperty()
# date = db.DateTimeProperty()
class MainPage(webapp.RequestHandler):
def post(self):
store = Store()
store.sequence = self.request.get('sequence')
store.place = self.request.get('place')
# store.date = self.request.get('time')
store.put()
class Display(webapp.RequestHandler):
def get(self):
store_query = Store.all()
store = store_query.fetch(10)
template_values = {
'store' : store
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path,template_values))
application = webapp.WSGIApplication(
[('/',MainPage),('/display', Display)],
debug = True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,065 | py | 4 | appengine.py | 2 | 0.659155 | 0.657277 | 0 | 40 | 25.55 | 68 |
TiRisik/for_project | 16,381,005,290,145 | f5588386c40485b9a599009974540e61166f4c17 | 83bbf1c81e69350220c62f66b9e6c3eeba488034 | /main.py | 78a7a1e77a6735df6b95154a36b3d1841f4cb4b9 | []
| no_license | https://github.com/TiRisik/for_project | d4335893c9c657d90c9c1c5e1ac535669aa5b038 | d05ee7e59101f2508a0698f5977178760a97b367 | refs/heads/master | 2023-04-30T08:51:32.560751 | 2021-05-18T04:25:39 | 2021-05-18T04:25:39 | 368,399,654 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request, render_template, redirect, url_for
from mongodb_class import User, Date, Person
from mongoengine import *
app = Flask(__name__)
app.config['SECRET_KEY'] = 'yandexlyceum_secret_key'
connect('db', host='mongodb+srv://Tiris:Et21121982@anibus.rzt5y.mongodb.net/db?retryWrites=true&w=majority')
result_user = None
@app.route('/login', methods=['GET', 'POST'])
def login():
global result_user
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
email = request.form['email']
password = request.form['password']
result_user = list(User.objects(log_user=email))
if result_user:
if password == result_user[0].pass_user:
return render_template('main.html')
else:
return render_template('login.html')
else:
return render_template('login.html')
@app.route("/main")
def main():
return render_template('main.html')
@app.route("/information", methods=['GET', 'POST'])
def informaon():
if request.method == 'GET':
return render_template('secret_two.html')
elif request.method == 'POST':
date1 = request.form['date']
result_date = list(Date.objects(date=date1))
return render_template('secret_two.html', tovar=result_date[0].data)
@app.route("/people", methods=['GET', 'POST'])
def information():
if request.method == 'GET':
result_person = list(Person.objects(search=1))
h = []
for i in result_person:
fio = i.name + ' ' + i.surname + ' ' + i.otchestvo
h.append([fio, i.age, i.dolgnost])
return render_template('secret_three.html', tovar=h)
elif request.method == 'POST':
if request.form['button'] == 'plus':
return redirect(url_for('secret'))
else:
return redirect(url_for('secret_four'))
@app.route("/secret", methods=['GET', 'POST'])
def secret():
if request.method == 'GET':
return render_template('secret.html')
elif request.method == 'POST':
f = request.files['file']
sfname = 'static/img/a.jpg'
f.save(sfname)
new_person = Person(name=request.form['name'], surname=request.form['name2'], otchestvo=request.form['name3'],\
age=request.form['age'], search=1, dolgnost=request.form['dolgnost'], photo='static/img/a.jpg').save()
return redirect(url_for('main'))
@app.route("/secret_four", methods=['GET', 'POST'])
def secret_four():
if request.method == 'GET':
return render_template('secret_four.html')
elif request.method == 'POST':
results = list(Person.objects(surname=request.form['name2']))
for i in results:
if i.name == request.form['name'] and i.otchestvo == request.form['name3']:
i.delete()
return redirect(url_for('main'))
if __name__ == '__main__':
app.run(port=8080, host='127.0.0.1')
| UTF-8 | Python | false | false | 3,012 | py | 9 | main.py | 2 | 0.599934 | 0.590305 | 0 | 86 | 34.023256 | 130 |
danielcaraway/annaextension | 5,866,925,349,426 | ea0dd241f633e19da2a0d13133af4025283ab594 | 9434143bb5cfff10d602d35baaba51ac347d98ee | /app.py | 0bd8d7cb3f8c5447f8e25288f1c12a4e01a8bbfd | []
| no_license | https://github.com/danielcaraway/annaextension | 221948747d0888c8deffa19bbff0063aeb77e0e7 | eb5181ac930f556379592c738aed18c00a6665bc | refs/heads/master | 2023-01-30T13:01:50.103016 | 2020-12-19T21:32:39 | 2020-12-19T21:32:39 | 322,460,390 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template, url_for, request
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
import json
app = Flask(__name__)
def get_print_link(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
searched_word = 'Print'
results = soup.body.find_all(string=re.compile(
'.*{0}.*'.format(searched_word)), recursive=True)
return results[0].parent['href']
def get_ingredients_from_link(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
# recipe_name = soup.find_all('h3', 'wprm-recipe-name')[0].text.strip()
recipe_name = 'test'
ingredients = soup.find_all('li', "wprm-recipe-ingredient")
all_ingredients = []
for i in ingredients:
try:
amount = i.find_all("span", "wprm-recipe-ingredient-amount")
amount = amount[0].text
except:
amount = 'no amount'
try:
unit = i.find_all("span", "wprm-recipe-ingredient-unit")
unit = unit[0].text
except:
unit = 'no unit'
try:
name = i.find_all("span", "wprm-recipe-ingredient-name")
name = name[0].text
except:
name = 'no name'
all_ingredients.append({'url': url,
'recipe_name': recipe_name,
'amount': amount,
'unit': unit,
'name': name})
return all_ingredients
def add_ingredients_to_dictionary(formatted_ingredient, shopping_list):
# print(formatted_ingredient)
ingredient = formatted_ingredient['name']
amount = formatted_ingredient['amount']
unit = formatted_ingredient['unit']
amount_unit = "{}({})".format(amount, unit)
if ingredient in shopping_list:
shopping_list[ingredient] = shopping_list[ingredient] + \
' + ' + amount_unit
else:
shopping_list[ingredient] = amount_unit
return shopping_list
def get_ingredients_flask(my_input):
print_links = []
for blog in my_input:
print_links.append(get_print_link(blog))
not_working = []
all_ingredients = []
for link in print_links:
ingredients = get_ingredients_from_link(link)
if len(ingredients) == 0:
not_working.append(link)
else:
all_ingredients.append(ingredients)
results_flattened = [
item for sublist in all_ingredients for item in sublist]
shopping_list = {}
my_list = [add_ingredients_to_dictionary(
x, shopping_list) for x in results_flattened]
# for site in not_working:
# print('So sorry, working on getting info from', site)
shopping_list_array = ["{}: {}".format(
k, v) for k, v in shopping_list.items()]
return shopping_list_array
def run_the_thing(input):
try:
# recipe_links = json.loads(input)
print(input)
recipe_links = input.split(',')
return get_ingredients_flask(recipe_links)
except:
return "Oh shoot! Something is broken! "
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
# get `content` from form
task_content = request.form['content']
gobs_program = run_the_thing(task_content)
return render_template('result.html', passed=gobs_program)
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
| UTF-8 | Python | false | false | 3,536 | py | 4 | app.py | 1 | 0.587387 | 0.584842 | 0 | 122 | 27.983607 | 75 |
MapwayLabs/py3dtilers | 1,812,476,220,983 | ee19c75c834bda33ba1d208a2d680c6591c9e167 | 905f403132094fc53ec5fd771cf8300a3fb46bbd | /py3dtilers/GeojsonTiler/geojson.py | f0d39b7b7090d256309ea86201ba32953561236a | [
"ISC",
"Apache-2.0"
]
| permissive | https://github.com/MapwayLabs/py3dtilers | 8d604deecc16ab2526148c49c13b9b33d1676929 | 944795e6baaeeafff0d7050d77a96aab51582bbb | refs/heads/master | 2023-07-14T01:03:09.843668 | 2021-07-18T20:18:11 | 2021-07-18T20:18:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import os
from os import listdir
import sys
import numpy as np
import json
from scipy.spatial import ConvexHull
from shapely.geometry import Point, Polygon
# from rdp import rdp
from py3dtiles import TriangleSoup
from ..Common import ObjectToTile, ObjectsToTile
from .PolygonDetection import PolygonDetector
# The GeoJson file contains the ground surface of urban elements, mainly buildings.
# Those elements are called "features", each feature has its own ground coordinates.
# The goal here is to take those coordinates and create a box from it.
# To do this, we compute the center of the lower face
# Then we create the triangles of this face
# and duplicate it with a Z offset to create the upper face
# Then we create the side triangles to connect the upper and the lower faces
class Geojson(ObjectToTile):
n_feature = 0
def __init__(self, id=None):
super().__init__(id)
self.geom = TriangleSoup()
self.z = 0
self.height = 0
self.center = []
self.vertices = list()
self.triangles = list()
self.coords = list()
def get_center(self, coords):
x = 0
y = 0
for i in range(0, len(coords)):
x += coords[i][0]
y += coords[i][1]
x /= len(coords)
y /= len(coords)
return [x, y, self.z]
def create_triangles(self, vertices, coordsLenght):
# Contains the triangles vertices. Used to create 3D tiles
triangles = np.ndarray(shape=(coordsLenght * 4, 3, 3))
# Contains the triangles vertices index. Used to create Objs
triangles_id = np.ndarray(shape=(coordsLenght * 4, 3))
k = 0
# Triangles in lower and upper faces
for j in range(1, coordsLenght + 1):
# Lower
triangles[k] = [vertices[0], vertices[j], vertices[(j % coordsLenght) + 1]]
triangles_id[k] = [0, j, (j % coordsLenght) + 1]
# Upper
triangles[k + 1] = [vertices[(coordsLenght + 1)], vertices[(coordsLenght + 1) + (j % coordsLenght) + 1], vertices[(coordsLenght + 1) + j]]
triangles_id[k + 1] = [(coordsLenght + 1), (coordsLenght + 1) + (j % coordsLenght) + 1, (coordsLenght + 1) + j]
k += 2
# Triangles in side faces
for i in range(1, coordsLenght + 1):
triangles[k] = [vertices[i], vertices[(coordsLenght + 1) + i], vertices[(coordsLenght + 1) + (i % coordsLenght) + 1]]
triangles_id[k] = [i, (coordsLenght + 1) + i, (coordsLenght + 1) + (i % coordsLenght) + 1]
triangles[k + 1] = [vertices[i], vertices[(coordsLenght + 1) + (i % coordsLenght) + 1], vertices[(i % coordsLenght) + 1]]
triangles_id[k + 1] = [i, (coordsLenght + 1) + (i % coordsLenght) + 1, (i % coordsLenght) + 1]
k += 2
return [triangles, triangles_id]
# Flatten list of lists (ex: [[a, b, c], [d, e, f], g]) to create a list (ex: [a, b, c, d, e, f, g])
@staticmethod
def flatten_list(list_of_lists):
if len(list_of_lists) == 0:
return list_of_lists
if isinstance(list_of_lists[0], list):
return Geojson.flatten_list(list_of_lists[0]) + Geojson.flatten_list(list_of_lists[1:])
return list_of_lists[:1] + Geojson.flatten_list(list_of_lists[1:])
def parse_geojson(self, feature, properties):
# Current feature number
Geojson.n_feature += 1
# If precision is equal to 9999, it means Z values of the features are missing, so we skip the feature
prec_name = properties[properties.index('prec') + 1]
if prec_name != 'NONE':
if prec_name in feature['properties']:
if feature['properties'][prec_name] >= 9999.:
return False
else:
print("No propertie called " + prec_name + " in feature " + str(Geojson.n_feature))
return False
height_name = properties[properties.index('height') + 1]
if height_name in feature['properties']:
if feature['properties'][height_name] > 0:
self.height = feature['properties'][height_name]
else:
return False
else:
print("No propertie called " + height_name + " in feature " + str(Geojson.n_feature))
return False
z_name = properties[properties.index('z') + 1]
if z_name in feature['properties']:
self.z = feature['properties'][z_name] - self.height
else:
print("No propertie called " + z_name + " in feature " + str(Geojson.n_feature))
return False
coordinates = feature['geometry']['coordinates']
try:
coords = Geojson.flatten_list(coordinates)
# Group coords into (x,y) arrays, the z will always be the same z
# The last point in features is always the same as the first, so we remove the last point
coords = [coords[n:n + 2] for n in range(0, len(coords) - 3, 3)]
self.coords = coords
center = self.get_center(coords)
self.center = [center[0], center[1], center[2] + self.height / 2]
except RecursionError:
return False
return True
def parse_geom(self):
# Realize the geometry conversion from geojson to GLTF
# GLTF expect the geometry to only be triangles that contains
# the vertices position, i.e something in the form :
# [
# [np.array([0., 0., 0,]),
# np.array([0.5, 0.5, 0.5]),
# np.array([1.0 ,1.0 ,1.0])]
# [np.array([0.5, 0.5, 0,5]),
# np.array([1., 1., 1.]),
# np.array([-1.0 ,-1.0 ,-1.0])]
# ]
coords = self.coords
height = self.height
# If the feature has at least 4 coords, create a convex hull
# The convex hull reduces the number of points and the level of detail
if len(coords) >= 4:
# coords = rdp(coords)
hull = ConvexHull(coords)
coords = [coords[i] for i in reversed(hull.vertices)]
coordsLenght = len(coords)
vertices = np.ndarray(shape=(2 * (coordsLenght + 1), 3))
# Set bottom center vertice value
vertices[0] = self.get_center(coords)
# Set top center vertice value
vertices[coordsLenght + 1] = [vertices[0][0], vertices[0][1], vertices[0][2] + height]
# For each coordinates, add a vertice at the coordinates and a vertice above at the same coordinates but with a Z-offset
for i in range(0, coordsLenght):
z = self.z
vertices[i + 1] = [coords[i][0], coords[i][1], z]
vertices[i + coordsLenght + 2] = [coords[i][0], coords[i][1], z + height]
if(len(vertices) == 0):
return False
# triangles[0] contains the triangles with coordinates ([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]) used for 3DTiles
# triangles[1] contains the triangles with indexes ([1, 2, 3]) used for Objs
triangles = self.create_triangles(vertices, coordsLenght)
self.geom.triangles.append(triangles[0])
self.set_box()
self.vertices = vertices
self.triangles = triangles[1]
return True
def get_geojson_id(self):
return super().get_id()
def set_geojson_id(self, id):
return super().set_id(id)
class Geojsons(ObjectsToTile):
"""
A decorated list of ObjectsToTile type objects.
"""
defaultGroupOffset = 50
features_dict = {}
base_features = list()
def __init__(self, objs=None):
super().__init__(objs)
# Round the coordinate to the closest multiple of 'base'
@staticmethod
def round_coordinate(coordinate, base):
rounded_coord = coordinate
for i in range(0, len(coordinate)):
rounded_coord[i] = base * round(coordinate[i] / base)
return rounded_coord
@staticmethod
def group_features_by_polygons(features, path):
try:
polygon_path = os.path.join(path, "polygons")
polygon_dir = listdir(polygon_path)
except FileNotFoundError:
print("No directory called 'polygons' in", path, ". Please, place the polygons to read in", polygon_path)
print("Exiting")
sys.exit(1)
polygons = list()
for polygon_file in polygon_dir:
if(".geojson" in polygon_file or ".json" in polygon_file):
with open(os.path.join(polygon_path, polygon_file)) as f:
gjContent = json.load(f)
for feature in gjContent['features']:
coords = feature['geometry']['coordinates']
coords = Geojson.flatten_list(coords)
coords = [coords[n:n + 2] for n in range(0, len(coords) - 2, 2)]
polygons.append(Polygon(coords))
return Geojsons.distribute_features_in_polygons(features, polygons)
@staticmethod
def group_features_by_roads(features, path):
try:
road_path = os.path.join(path, "roads")
road_dir = listdir(road_path)
except FileNotFoundError:
print("No directory called 'roads' in", path, ". Please, place the roads to read in", road_path)
print("Exiting")
sys.exit(1)
lines = list()
for road_file in road_dir:
if(".geojson" in road_file or ".json" in road_file):
with open(os.path.join(road_path, road_file)) as f:
gjContent = json.load(f)
for feature in gjContent['features']:
if 'type' in feature['geometry'] and feature['geometry']['type'] == 'LineString':
lines.append(feature['geometry']['coordinates'])
print("Roads parsed from file")
p = PolygonDetector(lines)
polygons = p.create_polygons()
return Geojsons.distribute_features_in_polygons(features, polygons)
# Group features which are in the same cube of size 'size'
@staticmethod
def group_features_by_cube(features, size):
features_dict = {}
# Create a dictionary key: cubes center (x,y,z); value: list of features index
for i in range(0, len(features)):
closest_cube = Geojsons.round_coordinate(features[i].center, size)
if tuple(closest_cube) in features_dict:
features_dict[tuple(closest_cube)].append(i)
else:
features_dict[tuple(closest_cube)] = [i]
return Geojsons.group_features(features, features_dict)
@staticmethod
def group_features(features, dictionary):
k = 0
grouped_features = list()
grouped_features_dict = {}
for key in dictionary:
geojson = Geojson("group" + str(k))
z = 9999
height = 0
coords = list()
grouped_features_dict[k] = []
for j in dictionary[key]:
grouped_features_dict[k].append(j)
height += features[j].height
if z > features[j].z:
z = features[j].z
for coord in features[j].coords:
coords.append(coord)
geojson.coords = coords
geojson.z = z
geojson.height = height / len(dictionary[key])
center = geojson.get_center(coords)
geojson.center = [center[0], center[1], center[2] + geojson.height / 2]
grouped_features.append(geojson)
k += 1
Geojsons.features_dict = grouped_features_dict
return grouped_features
@staticmethod
def distribute_features_in_polygons(features, polygons):
features_dict = {}
features_without_poly = list()
for i in range(0, len(features)):
p = Point(features[i].center)
in_polygon = False
for index, polygon in enumerate(polygons):
if p.within(polygon):
if index in features_dict:
features_dict[index].append(i)
else:
features_dict[index] = [i]
in_polygon = True
break
if not in_polygon:
features_without_poly.append(features[i])
grouped_features = Geojsons.group_features(features, features_dict)
return grouped_features
@staticmethod
def retrieve_geojsons(path, group, properties, obj_name):
"""
:param path: a path to a directory
:return: a list of geojson.
"""
geojson_dir = listdir(path)
Geojsons.features_dict = {}
Geojsons.base_features = list()
vertices = list()
triangles = list()
features = list()
vertice_offset = 1
center = [0, 0, 0]
objects = list()
for geojson_file in geojson_dir:
if(os.path.isfile(os.path.join(path, geojson_file))):
if(".geojson" in geojson_file or ".json" in geojson_file):
# Get id from its name
id = geojson_file.replace('json', '')
with open(os.path.join(path, geojson_file)) as f:
gjContent = json.load(f)
k = 0
for feature in gjContent['features']:
if "ID" in feature['properties']:
feature_id = feature['properties']['ID']
else:
feature_id = id + str(k)
k += 1
geojson = Geojson(feature_id)
if(geojson.parse_geojson(feature, properties)):
features.append(geojson)
grouped = True
if 'road' in group:
grouped_features = Geojsons.group_features_by_roads(features, path)
elif 'polygon' in group:
grouped_features = Geojsons.group_features_by_polygons(features, path)
elif 'cube' in group:
try:
size = int(group[group.index('cube') + 1])
except IndexError:
size = Geojsons.defaultGroupOffset
grouped_features = Geojsons.group_features_by_cube(features, size)
else:
grouped_features = features
grouped = False
if grouped:
for geojson in features:
if(geojson.parse_geom()):
Geojsons.base_features.append(geojson)
for feature in grouped_features:
# Create geometry as expected from GLTF from an geojson file
if(feature.parse_geom()):
objects.append(feature)
if not obj_name == '':
# Add triangles and vertices to create an obj
for vertice in feature.vertices:
vertices.append(vertice)
for triangle in feature.triangles:
triangles.append(triangle + vertice_offset)
vertice_offset += len(feature.vertices)
for i in range(0, len(feature.center)):
center[i] += feature.center[i]
if not obj_name == '':
center[:] = [c / len(objects) for c in center]
file_name = obj_name
f = open(os.path.join(file_name), "w")
f.write("# " + file_name + "\n")
for vertice in vertices:
f.write("v " + str(vertice[0] - center[0]) + " " + str(vertice[1] - center[1]) + " " + str(vertice[2] - center[2]) + "\n")
for triangle in triangles:
f.write("f " + str(int(triangle[0])) + " " + str(int(triangle[1])) + " " + str(int(triangle[2])) + "\n")
return Geojsons(objects)
| UTF-8 | Python | false | false | 15,946 | py | 34 | geojson.py | 27 | 0.552113 | 0.540512 | 0 | 418 | 37.148325 | 150 |
ray-project/maze-raylit | 7,430,293,422,790 | 6536efab1504d92413ccffefb90f192f218cfa30 | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /rllib/examples/env/repeat_after_me_env.py | 445320b52dcb58f3d35d1fca4c41345ca14bea88 | [
"Apache-2.0",
"MIT"
]
| permissive | https://github.com/ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | false | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | 2020-12-04T19:08:05 | 2020-12-04T22:34:15 | 100,068 | 1 | 0 | 0 | Python | false | false | import gym
from gym.spaces import Discrete
import random
class RepeatAfterMeEnv(gym.Env):
"""Env in which the observation at timestep minus n must be repeated."""
def __init__(self, config):
self.observation_space = Discrete(2)
self.action_space = Discrete(2)
self.delay = config.get("repeat_delay", 1)
assert self.delay >= 1, "`repeat_delay` must be at least 1!"
self.history = []
def reset(self):
self.history = [0] * self.delay
return self._next_obs()
def step(self, action):
if action == self.history[-(1 + self.delay)]:
reward = 1
else:
reward = -1
done = len(self.history) > 100
return self._next_obs(), reward, done, {}
def _next_obs(self):
token = random.choice([0, 1])
self.history.append(token)
return token
| UTF-8 | Python | true | false | 880 | py | 1,089 | repeat_after_me_env.py | 795 | 0.578409 | 0.5625 | 0 | 31 | 27.387097 | 76 |
RWTH-EBC/pyDMPC | 16,793,322,152,189 | dfb6b7810d20496e18ea0b05234b97d01d09f6e7 | 26aadffe833b6f4e44d30563c96981887d5aedd9 | /pyDMPC/SpecialStudies/entropy-agents.py | cb81e9d0f19d320cf1b89601db511a43b65e5177 | [
"LGPL-3.0-only",
"BSD-3-Clause",
"GPL-2.0-only",
"BSD-2-Clause",
"GPL-1.0-or-later",
"GPL-3.0-only",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/RWTH-EBC/pyDMPC | 4a932a49deaad341d94f0c486d8075b4be1522de | 90f0753b708d9e8dfdd78256af9adf58f0887f7a | refs/heads/master | 2021-06-30T11:47:14.871487 | 2020-08-09T11:59:43 | 2020-08-09T11:59:43 | 135,815,241 | 19 | 6 | MIT | false | 2019-11-13T22:33:11 | 2018-06-02T12:19:11 | 2019-11-13T21:15:09 | 2019-11-13T22:33:10 | 5,103 | 2 | 1 | 23 | Python | false | false | from joblib import load
import numpy as np
# Weight factors
a_1 = 1
a_2 = 1
a_3 = 1
# Working directory
wkdir = r"C:\TEMP\Dymola"
def agent_1():
# Load the classifier: returns trajectory class
clf = load(wkdir + r"\heater-clf.joblib")
# Load the cost of each class
cost_clf = load(wkdir + r"\cost-clf.joblib")
av_clf = load(wkdir + r"\av-clf.joblib")
prob_dic = {1:0.1,2:0.3,3:0.1,4:0.5}
return clf, prob_dic, cost_clf, av_clf
def agent_2(input, probs):
# Standard sequences used for this case study
seq = [[0,0],[100,100],[45,35],[60,50],[30,20],[50,50],[20,20]]
min_cost = []
# Load the subsystem model and the corresponding scaler
MLPModel = load(wkdir + r"\heater.joblib")
scaler = load(wkdir + r"\heater_scaler.joblib")
# Get all the information from the other agent
clf, prob_dic, cost_clf, av_clf = agent_1()
for l,w in enumerate(input):
cost = []
kl = []
for k,u in enumerate(seq):
count_c = 0
command = []
Temp = []
x_train =[]
v = []
tim_con = 10
for t in range(70):
if t < 10:
com = 0
elif t < 40:
com = u[0]
else:
com = u[1]
Temp.append(w)
if t > 0:
if abs(com-command[-1])>0.001:
if count_c < tim_con:
command.append(command[-1] + (com-command[-1])/(tim_con-count_c))
count_c += 0
else:
command.append(com)
count_c = 1
else:
command.append(com)
else:
command.append(com)
x_train = np.stack((command, Temp), axis=1)
x_train = np.array(x_train)
scaled_instances = scaler.transform(x_train)
temp = MLPModel.predict(scaled_instances)
entr = 0
for t in range(10,70,1):
dif = temp[t] - temp[t-1]
v.append(dif)
Twout = 273+ 50 - 0.5*1000*(temp[t]-w)/0.25/4180
entr += abs(0.5*1000*np.log((273 + w)/(273 + temp[t])) +
0.25*4180*np.log((273 + 50)/(Twout)))
entr = entr/60
av = 0
for t in range(60,70,1):
av += temp[t]
av = av/10
values = [val for k,val in enumerate(v)]
x_test = [values]
# Find the correct class
cl = clf.predict(x_test)
x_test = [[av, cl[0]]]
# Cost of the downstream subsystem
c1 = np.interp(av,av_clf[cl[0]-1],cost_clf[cl[0]-1])
# KL-divergence
kl.append(a_3*probs[l]*np.log(probs[l]/prob_dic[float(cl)]))
# Cost in the subsystem itself
cost.append(a_1*c1 + a_2*entr +
a_3*probs[l]*np.log(probs[l]/prob_dic[float(cl)]))
# Find the minimal cost
min_cost.append(min(cost))
pos = np.argmin(cost)
print(kl[pos])
print(min(cost))
print(min(cost)-kl[pos])
return min_cost
def main():
agent_2([30],[1])
if __name__=="__main__": main()
| UTF-8 | Python | false | false | 3,408 | py | 86 | entropy-agents.py | 14 | 0.453345 | 0.410505 | 0 | 130 | 25.215385 | 93 |
shivankurkapoor/moleculardating | 1,194,000,912,116 | 1217e0834afa53822d7ebdb9667af45b909ad6be | 4eb61b533ec7c65134d6898dfa123b859e8b1f5f | /scripts/NGS/hddistribution.py | 6dcc98c4fc56f61229b3f361cfb03782076e1e7f | [
"BSD-3-Clause"
]
| permissive | https://github.com/shivankurkapoor/moleculardating | b1090c648804f489c965267ccd5557443dcce92e | 4a72c3e92a09ab321e0d92840cc7619857bbab8a | refs/heads/master | 2021-01-18T21:02:30.432791 | 2018-05-17T05:45:55 | 2018-05-17T05:45:55 | 87,005,214 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Author : Shivankur Kapoor
Contact : kapoors@usc.edu
Description: Generates hamming distance distribution plots for clustered and unclustered fasta flies
'''
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.ticker import MaxNLocator
# image_path = '/home/leelab/PycharmProjects/moleculardating/application/static/images'
# image_path = '/home/spark/moleculardating/application/static/images'
image_path = '/home/spark/moleculardating/application/static/images'
def distribution_plot(subject, time, hd, freq, output, type, request_type, request_id):
'''
Generates hd distribution plot
:param subject: name of the subject
:param time: time
:param hd: pairwise hamming distance dict
:param freq: list of frequency for each hd
:param output: output directory
:param type: type of run
:return: none
'''
# plt.figure(figsize=(5, 5))
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# ax.(axis='both', which='major', pad=15)
hist = plt.bar(hd, freq, color='grey')
ymin, ymax = ax.get_ylim()
if ymax == max(freq):
plt.ylim((0, max(freq) + 1))
plt.xlabel('Hamming Distance', fontsize=14)
plt.ylabel('Distribution', fontsize=14, labelpad=15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
if request_type == 'SINGLE':
plt.savefig(os.path.join(image_path, request_id + '_' + type + '.png'))
else:
plt.savefig(os.path.join(output, request_id + '_' + type + '.png'))
plt.close()
def hd_distribution(CLUSTERED_HD_DATA, UNCLUSTERED_HD_DATA, SUBJECT_DATA, OUTPUT, REQUEST_TYPE, REQUEST_ID):
'''
:param CLUSTERED_HD_DATA: path of hamming distance npy file for clustered fasta files
:param UNCLUSTERED_HD_DATA: path of hamming distance npy file for unclustered fasta files
:param SUBJECT_DATA: path of final csv file
:param OUTPUT: path of output dir
:return:
'''
clustered_hd_dict = dict(np.load(CLUSTERED_HD_DATA).item())
unclustered_hd_dict = dict(np.load(UNCLUSTERED_HD_DATA).item())
df = pd.read_csv(SUBJECT_DATA, index_col=False, dtype={"#SUBJECT": "string"})
for index, row in df.iterrows():
subject = str(row['#SUBJECT'])
clustered = str(row['CLUSTERED'])
time = float(row['TIME'])
if clustered == 'YES':
hd_dict = clustered_hd_dict[subject][time]
hd = sorted(hd_dict.keys())
freq = [hd_dict[h] for h in hd]
distribution_plot(subject, time, hd, freq, OUTPUT, 'CLUSTERED', REQUEST_TYPE, REQUEST_ID)
hd_dict = unclustered_hd_dict[subject][time]
hd = sorted(hd_dict.keys())
freq = [hd_dict[h] for h in hd]
distribution_plot(subject, time, hd, freq, OUTPUT, 'UNCLUSTERED', REQUEST_TYPE, REQUEST_ID)
elif clustered == 'NO':
hd_dict = unclustered_hd_dict[subject][time]
hd = sorted(hd_dict.keys())
freq = [hd_dict[h] for h in hd]
distribution_plot(subject, time, hd, freq, OUTPUT, 'UNCLUSTERED', REQUEST_TYPE, REQUEST_ID)
| UTF-8 | Python | false | false | 3,328 | py | 80 | hddistribution.py | 59 | 0.63762 | 0.632813 | 0 | 84 | 37.619048 | 108 |
dchristle/measurement | 17,403,207,494,240 | 0e4af01f851ac97a2537a918af7eb18226f6e262 | 2d02dc538862e822d90c8a5df2d2375b4cd5f166 | /scripts/monitor_environment.py | 12e9a774742cd39dc68e1b6572330eb41c445c62 | []
| no_license | https://github.com/dchristle/measurement | 1f6b7bbd0f58e5e9828bb067a66ab00230284f4f | 4cec9f0e2727c670be96cc117a85880e8e774e4e | refs/heads/master | 2020-12-14T06:02:25.540754 | 2016-05-31T22:40:59 | 2016-05-31T22:40:59 | 16,591,782 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Monitor temperature and humidity over optical table
import numpy
from time import time,sleep
import os
import qt
import logging
import msvcrt
import datetime
def simple(t_maximum, meas_period):
# This example will monitor the T/H for a period of up to
# t_maximum, which is measured in HOURS.
thmon = qt.instruments['thmon']
# meas_period is the measurement period denoted in seconds.
qt.mstart()
data = qt.Data(name='environment_monitor')
data.add_coordinate('Time (minutes)')
data.add_value('Temperature')
data.add_value('Humidity')
data.create_file()
filename=data.get_filepath()[:-4]
plot2d_1 = qt.Plot2D(data, name='env_temperature', valdim=1)
plot2d_2 = qt.Plot2D(data, name='env_humidity', valdim=2)
starttime = datetime.datetime.now()
timedelta = 0
while timedelta < t_maximum*3600:
if (msvcrt.kbhit() and (msvcrt.getch() == 'q')): break
currentT = thmon.get_probe1_temperature()*9/5+32
currentH = thmon.get_humidity()
currenttime = datetime.datetime.now()
c = currenttime - starttime
timedelta = (c.days * 86400 + c.seconds)
qt.msleep(meas_period)
data.add_data_point(timedelta/60.0,currentT,currentH)
plot2d_1.set_plottitle('Environment temperature monitoring, SiC SD Lab, ' + starttime.strftime("%A, %d. %B %Y %I:%M%p"))
plot2d_2.set_plottitle('Environment humidity monitoring, SiC SD Lab, ' + starttime.strftime("%A, %d. %B %Y %I:%M%p"))
plot2d_1.save_png(filename+'.png')
data.close_file()
qt.mend()
| UTF-8 | Python | false | false | 1,587 | py | 65 | monitor_environment.py | 65 | 0.657845 | 0.637051 | 0 | 62 | 24.596774 | 124 |
leparrav/LEARN-EXAMPLES | 14,637,248,553,791 | 54cb718d4fe640179d4eee873130171222d53740 | 8a8bffead4f7790a22a7cf74c7f7c60c0bd17d5b | /Django/PSite_project/portafolio/urls.py | 6f646c3cc792ac6ed58d5cb9af97245a1cf44054 | [
"MIT"
]
| permissive | https://github.com/leparrav/LEARN-EXAMPLES | 1ed7617851040238c2faf89202427e543c774b86 | f3900ec0c60e60378d5e982aaae95517adaf8541 | refs/heads/master | 2021-01-02T08:57:24.188141 | 2020-06-22T09:24:18 | 2020-06-22T09:24:18 | 21,235,808 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, url
from portafolio import views
urlpatterns = patterns('',
url(r'^$',views.index,name='index'),
url(r'^courses/$',views.courses,name='courses'),
url(r'^jobs/$',views.jobs,name='jobs'),
) | UTF-8 | Python | false | false | 231 | py | 210 | urls.py | 117 | 0.69697 | 0.69697 | 0 | 8 | 28 | 49 |
antsfamily/pyopt | 11,132,555,257,173 | 3cb97151e18036b391420cb59616a1d501d5f8bb | 3367a04a7652d2f65db232b5e90b82313a2b0603 | /pyopt/optimizer/salsa.py | a5b741f6a2f72c03ccd2d7e2143405cbe29f4119 | [
"MIT"
]
| permissive | https://github.com/antsfamily/pyopt | ff0f488d8b9db507071f3fba4001288780ba1a40 | e1d240321f954219daa44c5c7f73f6ad3f0e6427 | refs/heads/master | 2020-05-26T21:23:18.851051 | 2019-06-13T03:49:39 | 2019-06-13T03:49:39 | 188,378,039 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-07-06 10:38:13
# @Author : Yan Liu & Zhi Liu (zhiliu.mind@gmail.com)
# @Link : http://iridescent.ink
# @Version : $1.0$
#
# @Note : http://cascais.lx.it.pt/~mafonso/salsa.html
#
from __future__ import division, print_function, absolute_import
import numpy as np
class Salsa(object):
r"""Split Augmented Lagrangian Shrinkage Algorithm Object
"""
def __init__(self, arg):
super(Salsa, self).__init__()
self.arg = arg
self.k = 0
self.tol = 1.0e-10
self.maxiter = 100
| UTF-8 | Python | false | false | 614 | py | 13 | salsa.py | 12 | 0.571661 | 0.530945 | 0 | 27 | 21 | 64 |
Poriseler/messages | 18,339,510,356,974 | 3d323dd0808664f81c864df35356304f44fadc39 | 110227177f324c5e6e56026a63f152ec9e8fcf52 | /communication/migrations/0003_auto_20210522_2202.py | f4603a51aeba4714acb8cf97470dbaf5421c7787 | []
| no_license | https://github.com/Poriseler/messages | ef6fdeaa32add032d75bb15560dee1e945cff3ee | 70204c42f3fa2bf542950fc751d663071dae6b70 | refs/heads/main | 2023-04-26T09:18:58.754203 | 2021-05-23T10:27:15 | 2021-05-23T10:27:15 | 370,016,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.3 on 2021-05-22 20:02
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('communication', '0002_rename_messege_message'),
]
operations = [
migrations.AlterField(
model_name='message',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='message',
name='last_update',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| UTF-8 | Python | false | false | 649 | py | 3 | 0003_auto_20210522_2202.py | 3 | 0.582435 | 0.553159 | 0 | 24 | 25.041667 | 74 |
Silberschleier/pgm-exercises | 8,375,186,243,351 | 8c11dab25cf22ba3412659be6a3e232b814929b5 | 0e7036ed329cf69ffc735fc594f870152d781a32 | /sheet09/sheet09.py | a15db42da3ddb7e937f8b14d486183be3234ff2a | []
| no_license | https://github.com/Silberschleier/pgm-exercises | 1b599ae1eae2ae72207c53d17466fb85270fe53f | f57a8ba6ca20ec7522a9d26ee762550f2527e2c6 | refs/heads/master | 2021-09-04T22:20:23.589071 | 2018-01-22T17:07:35 | 2018-01-22T17:07:35 | 109,287,682 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 13:55:52 2018
@author: Maren
"""
import numpy as np
from itertools import product
table = { 'fuse': np.array((0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1)),
'drum': np.array((0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0)),
'toner': np.array((1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0)),
'paper': np.array((1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0)),
'roller': np.array((0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1)),
'burning': np.array((0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0)),
'quality': np.array((1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0)),
'wrinkled': np.array((0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1)),
'mult_pages': np.array((0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1)),
'paper_jam': np.array((0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0)),
}
def learn_prob(var):
pos = 0
for val in var:
if val == 1:
pos += 1
return float(pos) / len(var)
def learn_prob_with_one_parent(var, par):
probs = []
for val_par in [0, 1]:
sublist = []
for i in range(len(var)):
if par[i] == val_par:
sublist.append(var[i])
pos = 0
for val in sublist:
if val == 1:
pos += 1
if len(sublist) == 0:
probs.append(0.0)
else:
probs.append((np.round((float(pos) / len(sublist)), decimals=4), val_par))
return probs
def learn_prob_with_two_parents(var, par1, par2):
probs = []
for val_par1, val_par2 in product([0, 1], [0, 1]):
sublist = []
for i in range(len(var)):
if par1[i] == val_par1 and par2[i] == val_par2:
sublist.append(var[i])
pos = 0
for val in sublist:
if val == 1:
pos += 1
if len(sublist) == 0:
probs.append(0.0)
else:
probs.append((np.round((float(pos) / len(sublist)), decimals=4), val_par1, val_par2))
return probs
def learn_prob_with_three_parents(var, par1, par2, par3):
probs = []
for val_par1, val_par2, val_par3 in product([0, 1], [0, 1], [0, 1]):
sublist = []
for i in range(len(var)):
if par1[i] == val_par1 and par2[i] == val_par2 and par3[i] == val_par3:
sublist.append(var[i])
pos = 0
for val in sublist:
if val == 1:
pos += 1
if len(sublist) == 0:
probs.append(0.0)
else:
probs.append((np.round((float(pos) / len(sublist)), decimals=4), val_par1, val_par2, val_par3))
return probs
def backward_probability_one_parent(ev, par, prior, probs):
for prob in probs:
if prob[1] == ev:
likelihood = prob[0]
evidence = (1-prior)*probs[0][0] + prior*probs[1][0]
return (likelihood * prior) / evidence
# The first parent variable is the one we want the posterior for
def backward_probability_two_parents(ev, par1, par2, prior1, prior2, probs):
likelihood = 0
for prob in probs:
if prob[1] == ev:
if prob[2] == 0:
likelihood += prob[0]*(1-prior2)
else:
likelihood += prob[0]*prior2
evidence = (1-prior1)*(1-prior2)*probs[0][0] + (1-prior1)*prior2*probs[1][0] + prior1*(1-prior2)*probs[2][0] + prior1*prior2*probs[3][0]
return (likelihood * prior1) / evidence
def prob_fuse_given_evidence():
prob_burning_given_fuse = learn_prob_with_one_parent(table['burning'], table['fuse'])[1][0]
prob_paper_jam_given_fuse = learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller'])[2][0]*(1-learn_prob(table['roller'])) + learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller'])[3][0]*learn_prob(table['roller'])
prob_wrinkled_given_fuse = 1 - (learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper'])[2][0]*(1-learn_prob(table['paper'])) + learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper'])[3][0]*learn_prob(table['paper']))
likelihood = prob_burning_given_fuse * prob_paper_jam_given_fuse * prob_wrinkled_given_fuse
prior = learn_prob(table['fuse'])
prob_burning = learn_prob_with_one_parent(table['burning'], table['fuse'])[0][0]*(1-learn_prob(table['fuse'])) + learn_prob_with_one_parent(table['burning'], table['fuse'])[1][0]*learn_prob(table['fuse'])
prob_paper_jam = learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller'])[2][0]*(1-learn_prob(table['roller']))*learn_prob(table['fuse']) + learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller'])[3][0]*learn_prob(table['roller'])*learn_prob(table['fuse']) + learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller'])[0][0]*(1-learn_prob(table['roller']))*(1-learn_prob(table['fuse'])) + learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller'])[1][0]*learn_prob(table['roller'])*(1-learn_prob(table['fuse']))
prob_wrinkled = 1 - (learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper'])[0][0]*(1-learn_prob(table['paper']))*(1-learn_prob(table['fuse'])) + learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper'])[1][0]*learn_prob(table['paper'])*(1-learn_prob(table['fuse'])) + learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper'])[2][0]*(1-learn_prob(table['paper']))*learn_prob(table['fuse']) + learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper'])[3][0]*learn_prob(table['paper'])*learn_prob(table['fuse']))
evidence = prob_burning * prob_paper_jam * prob_wrinkled
return (likelihood * prior) / evidence
def _print_table(header, table):
print "-" * 10 * len(header)
row_format = "{:>10}" * len(header)
print row_format.format(*header)
print "-" * 10 * len(header)
for row in table:
print row_format.format(*row)
print "-" * 10 * len(header)
print "\n"
if __name__ == '__main__':
print('\nPart 1\n')
print('P(Fuse=1): ' + str(learn_prob(table['fuse'])))
print('P(Drum=1): ' + str(learn_prob(table['drum'])))
print('P(Toner=1): ' + str(learn_prob(table['toner'])))
print('P(Paper=1): ' + str(learn_prob(table['paper'])))
print('P(Roller=1): ' + str(learn_prob(table['roller'])) + '\n')
print('P(Burning | Fuse):')
_print_table(('Burning=1', 'Fuse'), learn_prob_with_one_parent(table['burning'], table['fuse']))
print('P(Quality | Drum, Toner, Paper):')
_print_table(('Quality=1', 'Drum', 'Toner', 'Paper'), learn_prob_with_three_parents(table['quality'], table['drum'], table['toner'], table['paper']))
print('P(Wrinkled | Fuse, Paper):')
_print_table(('Wrinkled=1', 'Fuse', 'Paper'), learn_prob_with_two_parents(table['wrinkled'], table['fuse'], table['paper']))
print('P(Mult_Pages | Paper, Roller):')
_print_table(('P(M_P=1)', 'Paper', 'Roller'), learn_prob_with_two_parents(table['mult_pages'], table['paper'], table['roller']))
print('P(Paper_Jam | Fuse, Roller):')
_print_table(('P(P_J=1)', 'Fuse', 'Roller'), learn_prob_with_two_parents(table['paper_jam'], table['fuse'], table['roller']))
print('\nPart 2\n')
print('P(Fuse=1 | Burning=Paper_Jam=1, Wrinkled=0):')
print(prob_fuse_given_evidence()) | UTF-8 | Python | false | false | 7,419 | py | 10 | sheet09.py | 8 | 0.565171 | 0.520555 | 0 | 144 | 50.527778 | 602 |
juansdev/pytikzgenerate | 16,260,746,189,327 | 2a93315ef581299cbdc43a7a1b298a7b85219a25 | 3232871ea1a1a3be3f6e16d4c2257aa65d03e6af | /pytikzgenerate/modulos/submodulos/base_pytikz/__init__.py | 549c086dbb10b3eda3d6ae9651fada18a158309d | [
"MIT"
]
| permissive | https://github.com/juansdev/pytikzgenerate | 7ccaf5cd9be837d7797526183f354a521a8657a8 | 42e32ca7c3404c27b1f60299772cf3e4fb0d02f7 | refs/heads/master | 2023-08-02T04:21:20.684269 | 2021-09-21T01:04:00 | 2021-09-21T01:04:00 | 407,717,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class BasePytikz():
"""Clase padre, que tiene los atributos actualmente compatibles con Pytikz.
Atributos:
- COMANDOS_ANIDADORES(List[str]), tiene el nombre de los comandos anidadodres.
- DELIMITADORES_PARAMETROS_COMANDO_PERSONALIZADO(List[str]), tiene los delimitadores del valor de los parametros. se utiliza en el caso de que se tratase de un estilo con delimitadores, por ejemplo en el caso de un color RGB que tenga como delimitador "#7!70!black", solo se extrae el "#7" y posteriormente se devuelve solo el "7". Actualmente solo utilizado en los parametros de un comando personalizado invocado.
"""
#Utilizados en la clase "DepuradorTikz"
COMANDOS_ANIDADORES = ("foreach", "newcommand","animarPytikz","guardarPytikz")
DELIMITADORES_PARAMETROS_COMANDO_PERSONALIZADO = ("!")
#Utilizados en la clase "ValidadorPytikz"
COMANDOS_DIBUJAR_PYTIKZ = ("draw","fill","filldraw","shade")
COMANDOS_VARIABLES_PYTIKZ = ("tikzset","definecolor","foreach","animarPytikz","guardarPytikz")
#Utilizados en la clase "Validadores"
TIPO_DE_LINEAS_VALIDOS = ("dotted")
COLORES_VALIDOS = ("red","blue","green","cyan","black","yellow","white","purple")
ESTILOS_VALIDOS = ("help lines")
METRICA_VALIDOS = ("pt","cm")
#Todos los nombres de las llaves de parametro - Agregar, todos los entornos actualmente disponible para las variables... | UTF-8 | Python | false | false | 1,385 | py | 22 | __init__.py | 19 | 0.726354 | 0.722744 | 0 | 19 | 71.947368 | 418 |
SurfaceGentleman/Python | 12,652,973,668,148 | 47c30a1cf03f842cb99779223c21285dfb5e1bcf | e729c23dd15b7353159590bdf11c1c972cd365ee | /urllib/urllib的基本使用/cookies.py | 2a292293e659d49074a46887eb8bdbb8aeef60d4 | []
| no_license | https://github.com/SurfaceGentleman/Python | 1809796728626ab104747853f91b4aa457ede9d4 | 2b635ab9d8a2543c6b4a0273189c3d80e358f7e0 | refs/heads/master | 2020-03-20T05:52:28.924649 | 2018-10-19T08:34:06 | 2018-10-19T08:34:06 | 137,229,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.parse
import urllib.request
url = "https://user.qzone.qq.com/1226568176/main"
header = {
"Host": "user.qzone.qq.com",
"Connection": "keep-alive",
#"Cache-Control": "max-age=0",
#Upgrade-Insecure-Requests: 1
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36",
#Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
"Referer": "https://user.qzone.qq.com/1226568176",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cookie": "pgv_pvi=3164922880; RK=K4Jgn5Yha7; ptcz=42cc9f98ec478cc96d02159711f687e7a0ef8966e0ec2e38d89130cb223372ba; pt2gguin=o1226568176; pgv_pvid=2238499020; pgv_flv=-; _qpsvr_localtk=0.7295511567418862; pgv_si=s1861088256; pgv_info=ssid=s9155420505; uin=o1226568176; skey=@ck7jROcGc; ptisp=cm; p_uin=o1226568176; pt4_token=M7pWoQfX2SyJqXvzfQOYwqiNJReiCWilqUlau*I7K-g_; p_skey=Zg8DLr0pS7aYb9QsQoGsEFvMEBXaCJZUacGf5*vdaZo_; Loading=Yes; qz_screen=1920x1080; scstat=15; QZ_FE_WEBP_SUPPORT=1; cpu_performance_v8=0"
}
request = urllib.request.Request(url, headers=header)
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))
| UTF-8 | Python | false | false | 1,251 | py | 41 | cookies.py | 41 | 0.728217 | 0.557954 | 0 | 22 | 54.863636 | 517 |
vahidri/os-exer | 8,641,474,219,610 | f818ee70e7f36b7063b4ebafbad2dc9330b5b247 | 1b51b4c734110703f9f56f65b53ce3d2ba733fad | /Q2--mutex-h2o.py | 2d2f556093057254066ef7326a2a334e6789dc41 | []
| no_license | https://github.com/vahidri/os-exer | 2988082b2f45a5465cb1cedb33fb39042c8f4e3d | eea5fae671a40348699e6cb3ca60dbe29dd738fb | refs/heads/master | 2022-04-07T09:01:14.057817 | 2019-12-06T22:11:24 | 2019-12-06T22:11:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##semaphores
mutex = 1
ox = 0 #oxygen count
hy = 0 #hyrdogen count
flag_ox = 0 #gets true/1 after taking 2 hydrogens
flag_hy = 0 #gets true/1 after taking 1 oxygen
def prepare():
if hy >= 2 and ox >= 1 :
hy -= 2
signal(flag_hy)
ox -= 1
signal(flag_ox)
def make_water():
wait(flag_hy) #take the 2 hydrogens
wait(flag_ox) #take the 1 oxygen
print ('water molecule') #make H2O
def oxygen():
wait(mutex)
ox += 1 #new oxygen
##make water
prepare()
signal(mutex)
make_water()
def hydrogen():
wait(mutex)
hy += 1
prepare()
signal(mutex)
make_water()
| UTF-8 | Python | false | false | 644 | py | 12 | Q2--mutex-h2o.py | 7 | 0.57764 | 0.549689 | 0 | 33 | 18.484848 | 49 |
DichromaticLB/filter | 2,052,994,377,946 | 436b12ba9cc4d0ce680a4acb6d44dca96ca31c14 | 437f7203eeeb9e84e7fb979b449c4fac8773fd80 | /py/testContent.py | eea9ebbd80909bbe561603b0bd8915c59fca72c5 | []
| no_license | https://github.com/DichromaticLB/filter | 649ece1566f68c5edcdd61a78a42517fb05f2d15 | 9c3b4e1d3b116a3cf9b595e738af033d70463b7e | refs/heads/master | 2018-02-16T16:47:29.496114 | 2017-05-04T22:11:21 | 2017-05-04T22:11:21 | 63,068,802 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scapy.all import *
from testCommon import *
def swchar(x):
if chance(2):
if x.islower():
return x.upper()
else:
return x.lower()
return x
def caseAlter(st):
y=st
while st!="" and st==y:
y=reduce(lambda x,y:x+y,map(lambda x:swchar(x),list(st)))
return y
def strNoContains(leng,strtoavoid):
res=randChar(leng)
while res.find(strtoavoid) !=-1:
res=randChar(leng)
return res
def isimpleContent(p,n=False):
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,5)))
if not n:
if chance(2):
val=randChar()
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\"; "%(val)
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,150)))
else:
val=randChar()
while p["packet"].build().find(val) != -1:
val=randChar()
break
p["rule"]=p["rule"]+ " content:!\"%s\"; "%(val)
else:
if chance(2):
val=randChar()
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:!\"%s\"; "%(val)
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,150)))
else:
val=randChar()
while p["packet"].build().find(val) != -1:
val=randChar()
break
p["rule"]=p["rule"]+ " content:\"%s\"; "%(val)
def isimpleCaseContent(p,n=False):
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,5)))
if not n:
val=randChar()
valCase=caseAlter(val)
p["packet"]=p["packet"]/Raw(valCase)
p["rule"]= p["rule"]+ " content:\"%s\";nocase; "%(val)
p["packet"]=p["packet"]/Raw(strNoContains(random.randint(0,150),valCase))
else:
val=randChar()
valCase=caseAlter(val)
p["packet"]=p["packet"]/Raw(valCase)
p["rule"]= p["rule"]+ " content:\"%s\"; "%(val)
p["packet"]=p["packet"]/Raw(strNoContains(random.randint(0,150),valCase))
def itestDepthConent(p,n=False):
depth=random.randint(30,50)
leng=random.randint(10,20)
if not n:
if chance(2):
val=randChar(depth-leng)
p["packet"]=p["packet"]/Raw(val)
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\";depth:%d; "%(val,depth)
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,150)))
else:
val=randChar(depth-leng+random.randint(1,leng))
p["packet"]=p["packet"]/Raw(val)
val=randChar(leng)
while p["packet"].build().find(val) != -1:
val=randChar(leng)
break
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:!\"%s\";depth:%d; "%(val,depth)
else:
if chance(2):
val=randChar(depth-leng+random.randint(1,leng))
p["packet"]=p["packet"]/Raw(val)
val=randChar(leng)
while p["packet"].build().find(val) != -1:
val=randChar(leng)
break
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\";depth:%d; "%(val,depth)
else:
val=randChar(depth-leng)
p["packet"]=p["packet"]/Raw(val)
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:!\"%s\";depth:%d; "%(val,depth)
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,150)))
def itestOffsetConent(p,n=False):
offset=random.randint(30,50)
leng=random.randint(10,20)
if not n:
val=randChar(offset+random.randint(0,4))
p["packet"]=p["packet"]/Raw(val)
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\";offset:%d; "%(val,offset)
p["packet"]=p["packet"]/Raw(randChar(random.randint(0,150)))
else:
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
berg=strNoContains(offset*3,val)
p["packet"]=p["packet"]/Raw(berg)
p["rule"]=p["rule"]+ " content:\"%s\";offset:%d; "%(val,offset)
def itestIsDataAt(p,n=False):
leng=random.randint(1,10)
leng2=random.randint(1,10)
if not n:
if chance(2):
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["packet"]=p["packet"]/Raw(randChar(leng2))
p["rule"]=p["rule"]+ " content:\"%s\";isdataat:%d; "%(val,leng+leng2)
else:
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\";isdataat:!%d; "%(val,leng+1)
else:
if chance(2):
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["packet"]=p["packet"]/Raw(randChar(leng2))
p["rule"]=p["rule"]+ " content:\"%s\";isdataat:%d; "%(val,leng+leng2+1)
else:
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\";isdataat:!%d; "%(val,leng)
def chainDistance(p,ch,bad):
pad=random.randint(0,5)
p["packet"]=p["packet"]/Raw(randChar(pad))
res=random.randint(10,20)
st=randChar(res)
p["packet"]=p["packet"]/Raw(st)
if not bad:
p["rule"]=p["rule"]+ " content:\"%s\";distance:%d; "%(st,pad)
else:
p["rule"]=p["rule"]+ " content:\"%s\";distance:%d; "%(st,pad+res/2)
if chance(ch):
chainDistance(p,ch+1,bad)
def itestDistanceContent(p,n=False):
leng=random.randint(1,20)
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\"; "%(val)
if not n:
chainDistance(p, 1,False)
else:
chainDistance(p, 1,True)
def chainWithin(p,ch,bad):
pad=random.randint(0,5)
p["packet"]=p["packet"]/Raw(randChar(pad))
res=random.randint(10,20)
st=randChar(res)
p["packet"]=p["packet"]/Raw(st)
if not bad:
p["rule"]=p["rule"]+ " content:\"%s\";within:%d; "%(st,pad+res)
else:
p["rule"]=p["rule"]+ " content:\"%s\";within:%d; "%(st,pad+res/2)
if chance(ch):
chainWithin(p,ch+1,bad)
def itestWithinContent(p,n=False):
leng=random.randint(1,20)
val=randChar(leng)
p["packet"]=p["packet"]/Raw(val)
p["rule"]=p["rule"]+ " content:\"%s\"; "%(val)
if not n:
chainWithin(p, 1,False)
else:
chainWithin(p, 1,True)
def testContent(log=True):
if os.path.isfile("debugo"):
os.remove("debugo")
data=randPacket()
posibilities=[itestIsDataAt,itestOffsetConent,itestDepthConent,isimpleCaseContent,isimpleContent,itestDistanceContent,itestWithinContent]
gn=chance(2)
arrayRand(posibilities,False)(data,gn)
data["rule"]=data["rule"] +" )"
packetTest(data["packet"],data["rule"],gn,"rulePY","packetPY",log)
return data;
if __name__ == "__main__" and "execute" in sys.argv:
log=False
tries=10
if(len(sys.argv)>1 and sys.argv[1]=="log"):
log=True
if(len(sys.argv)>2):
tries=int(sys.argv[2])
while(tries >0):
testContent(log)
tries=tries-1
print "Done"
| UTF-8 | Python | false | false | 6,276 | py | 62 | testContent.py | 6 | 0.608827 | 0.590822 | 0 | 225 | 26.893333 | 141 |
javsolgar/trabajo-AII | 14,181,982,051,728 | 68e00e51f7790f1b7ee62b312da256f18f81860d | b0f47cc31e9ac302bc79651aac3cff82392ec2ed | /game/views.py | 332506ff41d24b1335d7d9e5dcdb62ba5646c32f | []
| no_license | https://github.com/javsolgar/trabajo-AII | 4bc2f31ab7ae30562bcb147634dfbf6c985ef544 | e25b858f83f76e22335c7e47ec59b6555be2f10e | refs/heads/main | 2023-03-23T22:55:10.581382 | 2021-03-19T17:14:22 | 2021-03-19T17:14:22 | 323,925,633 | 0 | 0 | null | false | 2021-03-19T17:14:23 | 2020-12-23T14:37:54 | 2021-01-16T13:11:53 | 2021-03-19T17:14:22 | 3,199 | 0 | 0 | 0 | Python | false | false | from django.shortcuts import render
from whoosh.index import open_dir
from whoosh.qparser import QueryParser
from application.decorators import is_admin
from game.scrap_games import descarga_juegos
index_news = './indices/IndexNewsGames'
index_games = './indices/IndexGames'
@is_admin
def scrap_games(request):
descarga_juegos(index_games, index_news)
ix = open_dir(index_games)
with ix.searcher() as searcher:
cantidad = searcher.doc_count_all()
return render(request, 'admin/scrap.html', {'cantidad': cantidad, 'elemento': 'juegos'})
def list_games(request):
ix = open_dir(index_games)
res = []
with ix.searcher() as searcher:
cantidad = searcher.doc_count()
juegos = searcher.documents()
for juego in juegos:
url_imagen = juego['url_imagen']
titulo = juego['titulo']
res.append([url_imagen, titulo])
ix.close()
return render(request, 'game/list.html', {'juegos': res, 'cantidad': cantidad})
def show_game(request, game_title):
res = []
ix = open_dir(index_games)
with ix.searcher() as searcher:
query = QueryParser('titulo', ix.schema).parse(game_title)
result = searcher.search(query)
for juego in result:
res = [juego['titulo'],
juego['plataformas'].replace(',', ', '),
juego['desarrollador'],
juego['generos'].replace(',', ', '),
juego['url_juego'],
juego['jugadores'],
juego['url_imagen']]
ix.close()
return render(request, 'game/show.html', {'juego': res})
def buscar_juegos_titulo(request):
return render(request, 'game/filtro.html', {'filtro': 'titulo'})
def list_plataformas(request):
res = []
ix = open_dir(index_games)
with ix.searcher() as searcher:
juegos = searcher.documents()
for juego in juegos:
plataformas = juego['plataformas'].split(',')
for plataforma in plataformas:
if plataforma not in res:
res.append(plataforma)
ix.close()
return render(request, 'game/filtro.html', {'opciones': sorted(res), 'filtro': 'plataformas'})
def list_generos(request):
res = []
ix = open_dir(index_games)
with ix.searcher() as searcher:
juegos = searcher.documents()
for juego in juegos:
generos = juego['generos'].split(',')
for genero in generos:
if genero not in res:
res.append(genero)
ix.close()
return render(request, 'game/filtro.html', {'opciones': sorted(res), 'filtro': 'generos'})
def list_games_filtrados(request):
res = []
ix = open_dir(index_games)
respuesta_formulario = request.GET.get('select_filtro')
if '_' in respuesta_formulario:
filtro, valor = respuesta_formulario.split('_')
else:
filtro = 'titulo'
valor = respuesta_formulario
with ix.searcher() as searcher:
query = QueryParser(filtro, ix.schema).parse(valor)
juegos = searcher.search(query)
for juego in juegos:
url_imagen = juego['url_imagen']
titulo = juego['titulo']
res.append([url_imagen, titulo])
ix.close()
return render(request, 'game/list.html', {'juegos': res, 'cantidad': len(res)})
| UTF-8 | Python | false | false | 3,394 | py | 41 | views.py | 23 | 0.594579 | 0.594579 | 0 | 118 | 27.762712 | 98 |
lincolnsotto/Python | 2,070,174,285,382 | fc8c159e3e12a529a79d24db94750bd2f520d4c9 | e4a13677811a06bdbdc269fa46351454a53e1a88 | /04_Livro_Automatizando_tarefas_macantes_com_python/cap_5_dictionary.py | 81d8214ebb5278fe933db3a909796197def1b454 | []
| no_license | https://github.com/lincolnsotto/Python | 9e11fd18146ea6305db01720d6b4082fe8c7faaf | d6da13d9a2ec0c44b688513b57c0a4c57c763abe | refs/heads/master | 2021-06-24T20:34:38.483479 | 2020-12-08T02:13:20 | 2020-12-08T02:13:20 | 181,244,283 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
#
# myCat = {"size": "fat", "color": "gray", "disposition": "loud"}
# print(myCat)
# print(myCat["size"])
# print("My cat has " + myCat["color"] + " fun")
##################################################################
#
# myDog = {1: "Little", 2: "Black"}
# print(myDog)
# print(myDog[1])
# print(myDog[2])
##################################################################
#
# spam = ["a", "b"]
# spam2 = ["b", "a"]
# print(spam == spam2)
#
# spam = {1: "a", 2: "b"}
# spam2 = {2: "b", 1: "a"}
# print(spam == spam2)
##################################################################
#
# It put friend's birthdays in dictionary
#
# birthdays = {"lincoln": "dec 20", "fabiana": "fev 02"}
#
# while True:
# print("Enter a name: (blank to quit)")
# name = input()
# if name == "":
# break
#
# if name in birthdays:
# print(birthdays[name] + " is the birthday of " + name)
# else:
# print("I don't have birthday information for " + name)
# print("What is their birthday? ")
# bday = input()
# birthdays[name] = bday
# print("birthday data base update")
# print(birthdays)
##################################################################
#
# spam = {"color": "red", "age": "42"}
# for i in spam.values():
# print(i)
# for i in spam.keys():
# print(i)
# for i in spam.items():
# print(i)
##################################################################
#
# spam = {"color": "red", "age": "42"}
# print(list(spam.values()))
# or use keys
# or use items
##################################################################
#
# spam = {"color": "red", "age": "42"}
# print(spam.keys())
# print(list(spam.keys()))
##################################################################
#
# spam = {"color": "red", "age": 42}
# for k, v in spam.items():
# print("key:" + k + "Value:" + str(v))
##################################################################
# KEYS(), VALUES(), ITEMS() #
# spam = {"color": "red", "age": 42}
# print("color" in spam.keys())
# print("red" in spam.values())
# print("age" in spam)
# print("color" in spam)
##################################################################
# GET() #
# picnicItems = {"apples": 5, "cups": 2}
# print("I'm bringing " + str(picnicItems.get("cups", 0)) + " cups")
# print("I'm bringing " + str(picnicItems.get("eggs", 2)) + " eggs")
# print(picnicItems)
##################################################################
# SETDEFAULT() #
# spam = {"name": "lincoln", "age": 28}
# if "color" not in spam:
# spam.setdefault("color", "red")
# print(spam)
# print(spam["color"])
# print("red" in spam.values())
##################################################################
# import pprint
#
# message = "Texto de exemplo para contagem de caracteres."
# emptydict = {}
#
# for character in message:
# emptydict.setdefault(character, 0)
# emptydict[character] = emptydict[character] + 1
# pprint.pprint(emptydict)
##################################################################
# LET`S PLAY GAME
# Criando um dicionário com as posições.
# theBoard = {"top-l": " ", "top-m": " ", "top-r": " ",
# "mid-l": " ", "mid-m": " ", "mid-r": " ",
# "low-l": " ", "low-m": " ", "low-r": " "}
# print(theBoard)
# Criando uma função que representará uma lista no formato do tabuleiro
# de chadrez
# def printBoard(board):
# print(board["top-l"] + "|" + board["top-m"] + "|" + board["top-r"])
# print("-+-+-")
# print(board["mid-l"] + "|" + board["mid-m"] + "|" + board["mid-r"])
# print("-+-+-")
# print(board["low-l"] + "|" + board["low-m"] + "|" + board["low-r"])
#
#
# turn = "X"
# for i in range(9):
# printBoard(theBoard)
# print("Turn for " + turn + ". Move on which space?")
# move = input()
# theBoard[move] = turn
# if turn == "X":
# turn = "0"
# else:
# turn = "X"
#
# printBoard(theBoard)
##################################################################
#
# allGuests = {"Aline": {"apples": 5, "pretzels": 12},
# "Bob": {"ham": 3, "apples": 2},
# "Carol": {"cups": 3, "apple pie": 1}}
#
#
# def totalBrought(guests, item):
# numbrought = 0
# for k, v in guests.items():
# numbrought = numbrought + v.get(item, 0)
# return numbrought
#
# print("Number of things being brought:")
# print("Apples: "+str(totalBrought(allGuests, "apples")))
# print("pretzels: "+str(totalBrought(allGuests, "pretzels")))
# print("ham: "+str(totalBrought(allGuests, "ham")))
# print("cups: "+str(totalBrought(allGuests, "cups")))
# print("apple pie: "+str(totalBrought(allGuests, "apple pie")))
##################################################################
| UTF-8 | Python | false | false | 4,780 | py | 19 | cap_5_dictionary.py | 11 | 0.435484 | 0.425848 | 0 | 167 | 27.586826 | 73 |
n43jl/mdp | 2,714,419,376,823 | 3de10e3c24e613f8c8a009b06d0355674275223e | 4e49b83d1615fc3edc4f552a1c909a8c31f89f68 | /convert_so_users_xml.py | fabbb819a2d23d5dcebbd54b58a3221d8d584675 | [
"MIT"
]
| permissive | https://github.com/n43jl/mdp | f6582aea48085e3ba296040a57c1db8d06d17211 | f12013436e60f0d96b50810741a12609d01ef31e | refs/heads/master | 2021-01-19T07:30:10.458582 | 2014-10-08T08:58:12 | 2014-10-08T08:58:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import csv
from mdp_util import get_int, get_string
answers = {}
LAST_ID = 3600581
with open('Users.xml') as input_file:
# with open('users.xml') as input_file:
with open('users.csv', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=' ')
for line in input_file:
id = get_int(line, 'Id="')
reputation = get_int(line, 'Reputation="')
writer.writerow([id, reputation])
if id:
print 'id: {0}; {1:.2f}%'.format(id, 100 * float(id)/ LAST_ID)
csv_file.close()
input_file.close() | UTF-8 | Python | false | false | 524 | py | 21 | convert_so_users_xml.py | 20 | 0.64313 | 0.618321 | 0 | 19 | 26.631579 | 66 |
LexGalante/Python.Lang | 1,005,022,374,521 | 0ff6597fbefc7301585f816985187c62cf00d60e | 5e35246c55b20768f80f46da2b1e626e088b51c9 | /colecoes/alta_performance.py | dd402a526d5b5378a1215b2051c7b2a6ea2db024 | []
| no_license | https://github.com/LexGalante/Python.Lang | c73dbeb8f75b7f33ac6065672927de85c87d28b7 | 1b4137565d420d9df002f1950b84646180e2d873 | refs/heads/master | 2022-07-03T07:20:36.797468 | 2020-05-10T03:15:09 | 2020-05-10T03:15:09 | 258,381,988 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Lista de alta performance
Coloca os valores como chave e o os valores são o numero de vezes que o mesmo aparece
"""
from collections import Counter
from collections import OrderedDict
from collections import defaultdict
from collections import namedtuple
print("counter")
lista = [1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 6, 7, 8, 9]
counter = Counter(lista)
print(dir(lista))
print(lista)
print(dir(counter))
print(counter)
counter = Counter("Alex Volnei Galante")
print(dir(counter))
print(counter)
print(counter.most_common(3))
print("defaultdict")
dicionario = defaultdict(lambda: "testando")
print(type(dicionario))
print(dir(dicionario))
dicionario["teste"] = "testando"
print(dicionario["nao_existess"])
print("ordereddict")
dicionario = {'a': 1, 'b': 2, 'd': 3, 'z': 4, 'c': 5}
for chave, valor in dicionario.items():
print(f"{chave} {valor}")
dicionario = OrderedDict({'a': 1, 'b': 2, 'd': 3, 'z': 4, 'c': 5})
for chave, valor in dicionario.items():
print(f"{chave} {valor}")
print("namedtuple")
tupla_nomeada = namedtuple('teste', 'item1 item2 item3')
tupla_nomeada = namedtuple('teste', 'item1, item2, item3')
tupla_nomeada = namedtuple('teste', ['item1', 'item2', 'item3'])
primeiro = tupla_nomeada(item1="teste", item2="teste", item3="teste")
print(primeiro)
| UTF-8 | Python | false | false | 1,289 | py | 38 | alta_performance.py | 36 | 0.695652 | 0.665373 | 0 | 47 | 26.404255 | 85 |
biroc/Olympic-Games-Demo | 2,001,454,808,417 | d0059f6a771eb4e31e44ebfd06d144656bc21ab3 | 61a0f515605b1dcd3cb8575c0896f3c33f7a0589 | /load_entityset.py | 11eaf5682ba98f3c5c8cb2f461dde9804ea14ea6 | []
| no_license | https://github.com/biroc/Olympic-Games-Demo | abe14c440d549246995b07b7af4f679500ae176d | 4d27696fe9869c4998d6af40d548cd878b2ef959 | refs/heads/master | 2021-07-20T15:30:02.202542 | 2017-10-26T14:30:47 | 2017-10-26T14:30:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import featuretools as ft
import os
def load_entityset(data_dir='~/olympic_games_data',
with_econ_data=False,
with_region_data=False,
countries_known_for_subsequent_games=False,
econ_path='~/olympic_games_data/economic_data/',
region_path='~/olympic_games_data/economic_data/',
since_date=None):
'''
1. Load data on each medal won at every summer Olympic Games
2. Load data about each country that won at least one medal through Olympic history
3. Do some formatting
4. Sort on Year
5. Add in a column representing a unique row for each Olympic Games
6. Separate team medals from individual medals
7. Create Featuretools EntitySet
olympic_games (regions)
| |
| countries __________________________
| | sports \
countries_at_olympic_games | (economic_indicators)
| disciplines
medals_won____________/
| athletes
medaling_athletes ____________________/
# do a bit more probing on analysis in simple version
# Clean up
'''
# Step 1
summer = pd.read_csv(
os.path.join(data_dir, 'summer.csv'), encoding='utf-8')
# winter = pd.read_csv(os.path.join(data_dir, 'winter.csv'), encoding='utf-8')
# Step 2
countries = pd.read_csv(
os.path.join(data_dir, 'dictionary.csv'), encoding='utf-8')
countries.drop(['GDP per Capita', 'Population'], axis=1, inplace=True)
# Some countries had a '*" at their end, which we need to remove
# in order to match with economic data
countries['Country'] = countries['Country'].str.replace('*', '')
countries = countries.append(
pd.DataFrame({
'Country': ['Unknown', 'Mixed Team'],
'Code': ['UNK', 'ZZX']
}),
ignore_index=True)
# Step 3
# Make names First Last instead of Last, First?
# These two lines were taken from https://www.kaggle.com/ash316/great-olympians-eda/notebook
summer['Athlete'] = summer['Athlete'].str.split(', ').str[::-1].str.join(' ')
summer['Athlete'] = summer['Athlete'].str.title()
# winter['Athlete']=winter['Athlete'].str.split(', ').str[::-1].str.join(' ')
# winter['Athlete']=winter['Athlete'].str.title()
summer['Year'] = (pd.to_datetime(summer['Year'], format="%Y") +
pd.offsets.MonthEnd(6)).dt.date
# winter['Year'] = (pd.to_datetime(winter['Year'], format="%Y")).dt.date
# Step 4
# summer['Games Type'] = 'Summer'
# winter['Games Type'] = 'Winter'
# medals_won = pd.concat([summer, winter]).sort_values(['Year'])
medals_won = summer.sort_values(['Year'])
if since_date is not None:
medals_won = medals_won[medals_won['Year'] >= since_date]
# Step 5
medals_won['Olympic Games Name'] = medals_won['City'].str.cat(
medals_won['Year'].astype(str), sep=' ')
medals_won['Country'].fillna("UNK", inplace=True)
medals_won['Olympic Games ID'] = medals_won[
'Olympic Games Name'].factorize()[0]
medals_won['Country'].fillna("UNK", inplace=True)
medals_won['Country Olympic ID'] = medals_won['Country'].str.cat(
medals_won['Olympic Games ID'].astype(str)).factorize()[0]
# Step 6
unique_cols = ['Year', 'Discipline', 'Event', 'Medal']
new_medals_won = medals_won.drop_duplicates(unique_cols, keep='first').reset_index(drop=True)
new_medals_won['medal_id'] = new_medals_won.index
athletes_at_olympic_games = medals_won.merge(new_medals_won[unique_cols + ['medal_id']], on=unique_cols, how='left')
athletes_at_olympic_games = athletes_at_olympic_games[['Year', 'Athlete', 'Gender', 'medal_id']]
medals_won = new_medals_won[[c for c in new_medals_won if c not in ['Athlete', 'Gender']]]
athletes_at_olympic_games['Athlete Medal ID'] = athletes_at_olympic_games['Athlete'].str.cat(
athletes_at_olympic_games['medal_id'].astype(str)).factorize()[0]
# There were 2 duplicate athlete entries in the data, get rid of them
athletes_at_olympic_games.drop_duplicates(['Athlete Medal ID'], inplace=True)
# Step 7
es = ft.EntitySet("Olympic Games")
es.entity_from_dataframe(
"medaling_athletes",
athletes_at_olympic_games,
index="Athlete Medal ID",
time_index='Year')
es.entity_from_dataframe(
"medals_won",
medals_won,
index="medal_id",
time_index='Year')
es.normalize_entity(
base_entity_id="medaling_athletes",
new_entity_id="athletes",
index="Athlete",
make_time_index=True,
new_entity_time_index='Year of First Medal',
additional_variables=['Gender'])
es.normalize_entity(
base_entity_id="medals_won",
new_entity_id="countries_at_olympic_games",
index="Country Olympic ID",
make_time_index=True,
new_entity_time_index='Year',
additional_variables=[
'City', 'Olympic Games Name', 'Olympic Games ID', 'Country'
])
es.normalize_entity(
base_entity_id="countries_at_olympic_games",
new_entity_id="olympic_games",
index="Olympic Games ID",
make_time_index=False,
copy_variables=['Year'],
additional_variables=['City'])
es.normalize_entity(
base_entity_id="medals_won",
new_entity_id="disciplines",
index="Discipline",
new_entity_time_index='Debut Year',
additional_variables=['Sport'])
es.normalize_entity(
base_entity_id="disciplines",
new_entity_id="sports",
new_entity_time_index='Debut Year',
index="Sport")
# map countries in medals_won to those in countries
mapping = pd.DataFrame.from_records(
[
('BOH', 'AUT', 'Bohemia'),
('ANZ', 'AUS', 'Australasia'),
('TTO', 'TRI', 'Trinidad and Tobago'),
('RU1', 'RUS', 'Russian Empire'),
('TCH', 'CZE', 'Czechoslovakia'),
('ROU', 'ROM', 'Romania'),
('YUG', 'SCG', 'Yugoslavia'),
('URS', 'RUS', 'Soviet Union'),
('EUA', 'GER', 'United Team of Germany'),
('BWI', 'ANT', 'British West Indies'),
('GDR', 'GER', 'East Germany'),
('FRG', 'GER', 'West Germany'),
('EUN', 'RUS', 'Unified Team'),
('IOP', 'SCG', 'Yugoslavia'),
('SRB', 'SCG', 'Serbia'),
('MNE', 'SCG', 'Montenegro'),
('SGP', 'SIN', 'Singapore'),
],
columns=['NewCode', 'Code', 'Country'])
columns_to_pull_from_similar = [
u'Code', u'Subregion ID', u'Land Locked Developing Countries (LLDC)',
u'Least Developed Countries (LDC)',
u'Small Island Developing States (SIDS)',
u'Developed / Developing Countries', u'IncomeGroup'
]
similar_countries = mapping['Code']
similar = countries.loc[countries['Code'].isin(similar_countries),
columns_to_pull_from_similar]
similar = similar.merge(
mapping, on='Code', how='outer').drop(
['Code'], axis=1).rename(columns={'NewCode': 'Code'})
countries = countries.append(
similar, ignore_index=True).reset_index(drop=True)
es.entity_from_dataframe("countries", countries, index="Code")
relationships = [
ft.Relationship(es['countries']['Code'],
es['countries_at_olympic_games']['Country']),
ft.Relationship(es['medals_won']['medal_id'],
es['medaling_athletes']['medal_id']),
]
es.add_relationships(relationships)
if countries_known_for_subsequent_games:
es['countries_at_olympic_games'].df['Year'] -= pd.Timedelta('7 days')
es.add_interesting_values()
return es
| UTF-8 | Python | false | false | 7,976 | py | 7 | load_entityset.py | 3 | 0.57485 | 0.570963 | 0 | 198 | 39.267677 | 120 |
shiksb/IntroPython | 14,121,852,469,889 | 605af43c96f6ba1f506b635c439817912977a8ce | 2b44eda0146c075fb0b440693f6740b21f01583b | /homework/HW2/solutions/missing_letters.py | c8db57cdc0807f66fbc9569ce37400a42103336b | []
| no_license | https://github.com/shiksb/IntroPython | b4433fdd51f2cb65609902622bdd5ae5c95e2ec1 | 82915092cdcc9ce1887ca743486bbadcde109ac2 | refs/heads/master | 2021-01-20T12:16:21.155644 | 2017-02-21T07:17:40 | 2017-02-21T07:17:40 | 82,648,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def missing_letters(s):
"""
@type s: str
@param s: String
@rtype: List
@return: Returns a list of the letters not present in the string
"""
# Write your code below.
# every index in this list represents the index of a letter in the alphabet
s = s.lower()
letter_present = [False for i in range(26)]
for letter in s:
# use ASCII to find out the index of the current letter in the alphabet
if letter.isalpha():
index = ord(letter) - ord('a')
# mark that letter as being seen
letter_present[index] = True
result = []
for idx in range(len(letter_present)):
if not letter_present[idx]:
# convert back to letter
missing_letter = chr(idx + ord('a'))
result.append(missing_letter)
return result
if __name__ == "__main__":
test_string_1="The quick brown fox jumps over the lazy dog"
print("Input:", test_string_1)
print("Your output:", missing_letters(test_string_1))
print("Expected:", [])
test_string_2="The brown fox jumps over the dog"
print("Input:", test_string_2)
print("Your output:", missing_letters(test_string_2))
print("Expected:", ['a','c','i','k','l','q','y','z'])
| UTF-8 | Python | false | false | 1,263 | py | 54 | missing_letters.py | 53 | 0.590657 | 0.584323 | 0 | 42 | 29.071429 | 79 |
vapvin/DataStructure | 10,557,029,618,693 | 6f71994794a5bcd1feba90b8015b63c35b0996e4 | 2c9918cc1ec326cdb63ff3f1529612763ae83134 | /array.py | 272d5542b928455fecf80a25aebf36eda4beecc8 | []
| no_license | https://github.com/vapvin/DataStructure | f21f2e0c0ed834998ba641998f547c3933d834c7 | 629147537905ccf8d9d0d27e2fa0220903ab9dae | refs/heads/master | 2021-02-04T15:17:51.320934 | 2020-10-27T12:46:07 | 2020-10-27T12:46:07 | 243,679,789 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Array Structure
## The array represents the data structure consisting of numbers and corresponding data.
## 배열은 번호와 번호에 대응하는 데이터들로 이루어진 자료 구조를 나타낸다.
# ex) C language Arry
"""
#include <stdio.h>
int main(void){
char arr[20];
}
"""
# Python dynamically allocates the length of the arrangement.
# 파이썬에서는 배열의 길이를 동적할당해준다.
arr = 'array'
print(arr)
arr = arr + "is Good"
print(arr)
# First Dimension list Array
array = [1, 2, 3, 4, 5]
print(array)
# Two Dimensional list Array
array_second = [[1,2,3,], [4,5,6,], [7,8,9]]
print(array_second[0])
print(array_second[0][0])
# dataset Count Frequency
dataset = ['Mikda', "list Mk", "What", "Bain sister eMa", "this Hour", "Live Computing", "List Models", "Django Admin", "Wakastring", "However", "Apple", "Mongo DB", "Maria DB", "Graphql"]
count = 0
for data in dataset:
for index in range(len(data)):
if data[index] == 'M':
count += 1
print(count) | UTF-8 | Python | false | false | 1,032 | py | 5 | array.py | 3 | 0.650215 | 0.627682 | 0 | 45 | 19.733333 | 188 |
Lee651/python | 2,422,361,567,691 | 9b6d0ca838f8c66fa9dc87f997ee4480a01ed294 | 21eccc7180610e40a3d247d334029067b679700f | /chapter9/user_admin.py | 8b6de6b1a2e388da796520feba12e631dc5ae705 | []
| no_license | https://github.com/Lee651/python | d4aa581a5fab7216326e5bf2a7660743480e208a | f03039dfac829f7e5a9c8cfd5667d7f114c61d55 | refs/heads/master | 2022-11-12T01:39:55.399504 | 2020-07-05T08:00:18 | 2020-07-05T08:00:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class User():
def __init__(self, first_name, last_name, age, height, gender):
self.full_name = first_name + " " + last_name
self.Age = str(age)
self.Height = str(height)
self.Gender = gender
def discribe_user(self):
print("\nThe person's message are: ")
print("name: " + self.full_name.title() + "\nage: " + self.Age + "\nheight: " + self.Height + "\ngender: " + self.Gender)
def greet_user(self):
print("\nHello, " + self.full_name + "!")
class Admin(User):
def __init__(self, first_name, last_name, age, height, gender):
super().__init__(first_name, last_name, age, height, gender)
self.privileges = ["can add post", "can delete post", "can ban user"]
def show_privileges(self):
t = "The user has " + str(self.privileges) + " priviliges"
print(t)
user_1 = Admin("xiangfei", "ai", 28, 170, "man")
user_1.discribe_user()
user_1.show_privileges()
| UTF-8 | Python | false | false | 967 | py | 38 | user_admin.py | 36 | 0.577042 | 0.568769 | 0 | 27 | 34.703704 | 129 |
Evertcolombia/graphql_starwars_api_relay | 1,391,569,412,324 | e42b91000b3ffcbecb55d25b218fc10b0f191d3b | c73fce92757189af28e87969b29836fea4377206 | /Grapqhl_django_api/Grapqhl_django_api/api_project/migrations/0013_auto_20201025_1953.py | 5a52f24bd4ebef76a06ac3950e08c28ab9410fa9 | [
"MIT"
]
| permissive | https://github.com/Evertcolombia/graphql_starwars_api_relay | 4f3b9ae4c0b2da98da29c496d4622813ed49b88b | 47bb409e19a2905509618230d7b04c2d59aba7fe | refs/heads/master | 2023-01-06T04:50:46.175507 | 2020-10-28T14:06:31 | 2020-10-28T14:06:31 | 308,015,877 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-10-25 19:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_project', '0012_people_films'),
]
operations = [
migrations.AlterField(
model_name='people',
name='films',
field=models.ManyToManyField(related_name='films', to='api_project.Film'),
),
]
| UTF-8 | Python | false | false | 419 | py | 25 | 0013_auto_20201025_1953.py | 20 | 0.596659 | 0.551313 | 0 | 18 | 22.277778 | 86 |
kirkbozeman/instatwit_automation | 13,125,420,097,964 | fb6ef6d1d1f2e8129cee91fe145ce3790238e449 | 149a92ea003b0b39081ffc62d476d578454aeebd | /insta_twitter_automation.py | 4743626346425d9e3e0e73def8c357cbac04c743 | []
| no_license | https://github.com/kirkbozeman/instatwit_automation | b6166e95dfd029c1518ec0b34685b1dbfeca9578 | 8d6234bb2332c24e69faa07d9e4ab03e5c62bc4c | refs/heads/master | 2020-04-28T23:27:36.178322 | 2019-03-18T16:10:09 | 2019-03-18T16:10:09 | 175,655,794 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Quick script used to push Instapaper articles to Twitter and clean up folders
Uses oauth1 + xauth + tweepy
Posts to twitter account @ireadstufftoo several times a day from remote server
"""
from config import instatwit, instap
import datetime
import pyinstapaper.instapaper as icc
from pyinstapaper_fix import instapaper_move_to_folder
import random
import tweepy
import urllib.request
def GetBookmark():
# instapaper
i = icc.Instapaper(instap["OAuthConsumerID"], instap["OAuth_Consumer_Secret"])
i.login(instap["email"], instap["password"])
marks = i.get_bookmarks(folder=instap["Post"], limit=500, have=None) # get up to 500
# choose bookmark based on weighted random
mx = len(marks)
wt = list(range(1, 10))*20 + list(range(11, mx-1)) + [mx]*100 # random but heavily weighted 1-10 and last
c = random.choice(wt) - 1
return marks[c] # pull weighted random
def PostToTwitter(mark):
# twitter
auth = tweepy.OAuthHandler(instatwit["consumerKey"], instatwit["consumerSecret"])
auth.set_access_token(instatwit["accessToken"], instatwit["accessTokenSecret"])
t = tweepy.API(auth)
#remove XXX from title if present
if mark.url.find("www.youtube.com") >= 0 and mark.title.find(" - YouTube") >= 0:
url = mark.url.replace(" - YouTube", "")
elif mark.url.find("www.marketwatch.com") >= 0 and mark.title.find(" - MarketWatch") >= 0:
url = mark.url.replace(" - MarketWatch", "")
else:
url = mark.url
post = mark.title + ' ' + url # default post
# post first highlight available (if it fits)
for h in mark.get_highlights():
if len(mark.get_highlights()) == 0: # break if no highlights
break
elif len('"' + h.text + '" ') + 23 <= 280: # twitter gives all links as 23 chars
post = '"' + h.text + '" ' + url
break
t.update_status(post)
return None
def SortAndArchiveBookmark(mark):
try:
# get html of link
with urllib.request.urlopen(mark.url) as p:
byt = p.read()
html = byt.decode("utf8")
low = html.lower()
if any(x in low for x in ["s&p", "economy", "stock market", "finance"]):
instapaper_move_to_folder(mark.bookmark_id, instap["Financial"])
message = "Financial: " + mark.title + ' ' + mark.url
elif "millennial" in low:
instapaper_move_to_folder(mark.bookmark_id, instap["Millennials"])
message = "Millennials: " + mark.title + ' ' + mark.url
elif "music" in low:
instapaper_move_to_folder(mark.bookmark_id, instap["Music"])
message = "Music: " + mark.title + ' ' + mark.url
else:
instapaper_move_to_folder(mark.bookmark_id, instap["Misc"])
message = "Misc: " + mark.title + ' ' + mark.url
except:
mark.archive()
message = f""""
There was a problem with " + {mark.url} + '.'
Bookmark has been archived without post to Twitter.
"""
return message
if __name__ == "__main__":
ok = None
while ok is None:
try:
mark = GetBookmark()
ok = urllib.request.urlopen(mark.url) # don't post if fails here
PostToTwitter(mark)
except:
pass
finally:
result = SortAndArchiveBookmark(mark) # always sort/archive bookmark
with open("log.txt", "a+") as f: # instatwit/log.txt for site
utc = datetime.datetime.utcnow()
f.write("\n" + utc.strftime("%Y-%m-%d %H:%M:%S") + " " + result)
| UTF-8 | Python | false | false | 3,609 | py | 3 | insta_twitter_automation.py | 3 | 0.597395 | 0.587697 | 0 | 109 | 32.036697 | 110 |
nxtabb/HIS | 6,442,450,982,671 | ca421000d86328367f6da7e926883c3800b6b680 | f1c9bee2dd4f3ec3a6246e965e8e502d4ab9488b | /apps/user/models.py | b0c49bd0eeaf4838cbbd71d3749d8326a3750992 | []
| no_license | https://github.com/nxtabb/HIS | cd4a051fe5f39ec38fd0a8804ee8e6f91c2a0538 | 9089d4cfd2f4f7577ed8ea391bed98f6b7e5fa23 | refs/heads/master | 2022-07-16T19:25:58.375648 | 2020-05-19T10:03:11 | 2020-05-19T10:03:11 | 265,220,147 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
from db.base_model import BaseModel
class User(AbstractUser,BaseModel):
AUTHORITY = (
(1, '管理员'),
(2, '普通用户'),
(3, '审查员'),
)
authority = models.SmallIntegerField(default=2, choices=AUTHORITY,verbose_name='身份')
class Meta:
db_table = 'df_user'
verbose_name = '用户'
verbose_name_plural = verbose_name
| UTF-8 | Python | false | false | 495 | py | 12 | models.py | 7 | 0.605996 | 0.59743 | 0 | 15 | 28.6 | 88 |
3jackdaws/django-ws-starter | 13,932,873,914,486 | c11c03ea23a5a9abf002e79cff2c874446a6bd34 | 33e6699b17e73ee5814d2f50fa31df01a13dc8e9 | /app/http/views/common.py | 8442e39045b927a45034cd101c0011368eb2f5ec | [
"MIT"
]
| permissive | https://github.com/3jackdaws/django-ws-starter | aadb761bfc4d8c3eab4209ba6d34df7d5c8d22aa | 15f5bc6188037e8bbc54fdfaad6f0a4bedf2e7d7 | refs/heads/master | 2021-08-22T23:48:14.046383 | 2017-12-01T18:51:54 | 2017-12-01T18:51:54 | 112,769,795 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mimetypes
from django.http import HttpResponse, FileResponse
from django.shortcuts import render, redirect
from app.settings import STATIC_BASE, BASE_DIR
from django.contrib.auth import login, authenticate, logout
def index(request):
return render(request, 'app/index.html')
def static(request, path):
filepath = STATIC_BASE + path
with open(filepath, 'rb') as fp:
sometext = fp.read()
return FileResponse(sometext, content_type=mimetypes.guess_type(filepath)[0])
| UTF-8 | Python | false | false | 501 | py | 14 | common.py | 8 | 0.746507 | 0.744511 | 0 | 18 | 26.833333 | 81 |
cecidisi/comet_urank_project | 19,232,863,565,480 | e898804ad88217ae0586998de9528227ce18f56a | f1c104edfee91228ada5ab26bd1564f085d253d1 | /upmc/db_connector.py | 30fa31dcec83e3d12c7ff242af0c64147ae1a0de | []
| no_license | https://github.com/cecidisi/comet_urank_project | 8ffb447bc3b5037ca1c4f559e82f17dba39bb68f | 3fd4bc32ed50bea3ba664d11332302d02feddb7e | refs/heads/master | 2020-12-02T18:07:53.627178 | 2019-04-25T12:48:27 | 2019-04-25T12:48:27 | 96,477,664 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
# from conf_navigator.models import *
from .models import *
from .serializers import *
from helper.bcolors import *
import json
class DBconnector:
@classmethod
def get_articles(cls):
articles = Article.objects.all()
articles = ArticleSerializer.setup_eager_loading(articles)
return ArticleSerializer(articles, many=True).data
@classmethod
def get_positions_in_text(cls, id_list):
positions = PubmedDocKeywords.objects.filter(pk__in=id_list)
@classmethod
def get_keywords(cls):
keywords = PubmedGlobalKeyword.objects.all().order_by('-score')
return PubmedGlobalKeywordSerializer(keywords, many=True).data
@classmethod
def get_article_details(cls, doc_id):
article = Article.objects.get(pk = doc_id)
return ArticleFullSerializer(article).data
@classmethod
def get_keyphrases(cls, kw_id):
keyphrases = PubmedKeyphrase.objects.filter(global_keyword_id=kw_id, count__gt=1)[:30]
return KeyphraseSerilizer(keyphrases, many=True).data
@classmethod
def search_features(cls, feature_type, text):
if feature_type == 'keyword':
keywords = PubmedGlobalKeyword.objects.filter(term__contains=text)
print_blue(str(len(keywords)))
if len(keywords):
return PubmedGlobalKeywordSerializer(keywords, many=True).data
return []
@classmethod
def get_year_facets(cls):
year_facets = PubmedYearFacet.objects.all().order_by('year')
return PubmedYearFacetSerialzer(year_facets, many=True).data
| UTF-8 | Python | false | false | 1,533 | py | 157 | db_connector.py | 121 | 0.759295 | 0.757339 | 0 | 54 | 27.296296 | 88 |
gadia-aayush/sample | 16,947,940,977,928 | 5a24ca26af6e8452748737094600f2036b05be02 | aa3b7c6a81a323d2e17a1be7cb7ce90a20d6099a | /cproject/cproject/urls.py | f1fda37d07db54cc78b571cf40954f1233c170a5 | []
| no_license | https://github.com/gadia-aayush/sample | fdf00a4a890af6e4380b133cc64d7df89c1defff | 145b83206f9fb0972d19bef9229da0c1bf0aede0 | refs/heads/master | 2022-12-22T16:54:50.228277 | 2020-08-18T20:26:05 | 2020-08-18T20:26:05 | 288,516,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """cproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('myauth/', include('myauth.urls')),
path('search/', include('elsearch.urls')),
path('get-books/', include('get_books.urls')),
path('category/', include('get_books.urls')),
path('get_bookinfo', include('isbn_bookinfo.urls')),
path('cp_recommendation', include('cp_recommendation.urls')),
path('product/', include('get_productinfo.urls')),
path('pincode/', include('pincode.urls')),
path('donation_form', include('donation_form.urls')),
path('get_razorpayid', include('generate_razorpayid.urls')),
path('user_orders/', include('user_orders.urls')),
path('get_ordertrack_url', include('order_track.urls')),
path('edit_address', include('edit_address.urls')),
path('delete_address', include('delete_address.urls')),
path('address_form', include('add_address.urls')),
path('user_address', include('get_address.urls')),
path('order_details', include('order_details.urls')),
path('all_orders', include('all_orders.urls')),
path('cancel_order', include('cancel_order.urls')),
path('notify_me', include('book_notify.urls')),
]
| UTF-8 | Python | false | false | 1,890 | py | 82 | urls.py | 54 | 0.677778 | 0.673545 | 0 | 44 | 41.954545 | 77 |
jamiezeminzhang/Leetcode_Python | 7,799,660,640,176 | 1116f837dee72cb865044c120f6f95fbe56e8c75 | 633eda99e8a4dd00a5549b6a63f5880dabaa0957 | /ALL_SOLUTIONS/127_OO_Word_Ladder.py | 61a42b1447d9c2eedf3f0ff5feb81ab06ab7bc07 | []
| no_license | https://github.com/jamiezeminzhang/Leetcode_Python | 1256e9097a482ab4a80363ddef8828986bbf9bde | 56383c2b07448a9017a7a707afb66e08b403ee76 | refs/heads/master | 2020-04-05T14:08:30.336545 | 2016-09-06T01:07:34 | 2016-09-06T01:07:34 | 53,762,214 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 21:45:27 2016
127. Word Ladder My Submissions Question
Total Accepted: 66019 Total Submissions: 337427 Difficulty: Medium
Given two words (beginWord and endWord), and a dictionary's word list, find
the length of shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time
Each intermediate word must exist in the word list
For example,
Given:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
Correct one: BFS
http://chaoren.is-programmer.com/posts/43039.html
@author: zeminzhang
"""
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: Set[str]
:rtype: int
"""
wordList.add(endWord)
q = []
q.append((beginWord,1))
while q:
curr = q.pop(0)
currword = curr[0]; currlen = curr[1]
if currword == endWord: return currlen
for i in range(len(beginWord)):
part1, part2 = currword[:i], currword[i+1:]
for j in 'abcdefghijklmnopqrstuvwxyz':
if currword[i] != j:
nextword = part1+j+part2
if nextword in wordList:
q.append((nextword,currlen+1))
wordList.remove(nextword)
return 0
sol = Solution()
#beginWord = "hit"
#endWord = "cog"
#wordList = {"hot","dot","dog","lot","log"}
beginWord = "hot"
endWord = "dog"
wordList = {"hot","dot","dog"}
print sol.ladderLength(beginWord, endWord, wordList)
| UTF-8 | Python | false | false | 1,966 | py | 263 | 127_OO_Word_Ladder.py | 263 | 0.60529 | 0.582909 | 0 | 65 | 29.246154 | 84 |
bmelo/NavalWeb | 4,406,636,472,028 | f5025154bc5582451894119937e06ae197c09097 | 26e901214377ea839b8ae4e9bf77a93e9f4b0725 | /convert_pipenet.py | c65ec0ead62462bfd562d24f6363dce956f7981f | []
| no_license | https://github.com/bmelo/NavalWeb | 30f150debb3e962c6292f0ee6799a01c2c3c5a3c | c2bd79a72b9b6a185cff566920db94bb5b266458 | refs/heads/master | 2021-01-07T00:17:45.237974 | 2020-02-28T16:55:28 | 2020-02-28T16:55:28 | 241,523,795 | 0 | 0 | null | true | 2020-02-19T03:28:36 | 2020-02-19T03:28:35 | 2020-02-18T19:08:04 | 2020-02-18T19:08:02 | 15,681 | 0 | 0 | 0 | null | false | false | import json
nodes = [
['C558A0627C9446548B23254872701164','Z-5111502P',False],
['950CBDB068994C10963963131A751BE0','"24""-SW-B18H-002-"',True],
['25B5BA5E7AC644CD84276902327BA1E1','VALV-51115002',False],
['E374ECABE2014723B6F0ADEA1799FDB2','"24""-SW-B18H-002-"',True],
['A9FA7C4BC8D344628D783465E50257B8','"24""-SW-B18H-002-"',True],
['0EB9ED0ACCBE41BB9F7C370F617AC1BD','"24""-SW-B18H-002-"',True],
['B8C3707136DB4966A6E534E033F778B9','XV-5111524',False],
['E37DCB79E4C64F63B1C071BDAB87A408','"24""-SW-B18H-002-"',True],
['563C412FFA1D46D7AEF9F7844237B8BB','"24""-SW-B18H-214-"',True],
['D2D3BC7AE3804003BA9477122F7BF0A9','"24""-SW-B18H-214-"',True],
['AF238B33DA0341DE92CD067F8C37FC5B','"14""-SW-B18H-214-NI"',True],
['3EE2AA386F8A4DAC879D7C277D7D4511','"24""-SW-B18H-214-"',True],
['5F62BFB7075C42D8BB1B3F69B499950B','"24""-SW-B18H-214-"',True],
['7342DE6A53E44B92A347A0AEB63CBCF5','HV-5111522',False],
['EF49451ECDB448C0B57D27BB83FE3770','"24""-SW-B18H-214-"',True],
['BA6B12F71AF248C6A8B348428016513E','"24""-SW-B18H-214-"',True],
['152CF901785642D3B4D63C848387ADCC','HV-5111521',False],
['8E5A24D1DF5042F9871CE6CA08BE4E08','"24""-SW-B18H-214-"',True],
['51445719BBE2483FA8D2123C5875FA6E','"24""-SW-B18H-214-"',True],
['F93E75371BFC4E1ABAB40BAB51D7AB09','"24""-SW-B18H-214-"',True],
['B7534C996FA1415ABEEF017E1EF2CCC6','"24""-SW-B18H-214-"',True],
['A4B1B7995172457886655672603D53F2','"24""-SW-B18H-214-"',True],
['A54D5B633BFC4A429D19AC778D3F2371','"14""-SW-B14H-005-"',True],
['3B7B5F5CD4BE4A4D86BF814A9B006219','"14""-SW-B14H-005-"',True],
['ED84453A06814765A04087F85C8FC487','2858',False],
['9316319A4A064FB6AD5134AA4A8946A4','"24""-SW-B18H-214-"',True],
['438D83A3617149E99769717D0609552A','"24""-SW-B18H-214-"',True],
['F9B6779DBBEF4BFCBF00B0153DDA6737','"14""-SW-B14H-012-"',True],
['20BEB38A4EFC44BFB7B28A1286E68079','"14""-SW-B14H-012-"',True],
['CBC5067AA669438691F35A4D9F316F9A','"24""-SW-B18H-002-"',True],
['243FF371296D43A884A895695512F576','HV-5111518',False],
['3F8F323D51154B47895CD45E32E450E8','"24""-SW-B18H-214-"',True],
['E7DEC852EDE7499A85B00731DC6C81D2','"14""-SW-B14H-012-"',True],
['2FB7A19582A04DDD8A619AA2D47A103A','2862',False],
['B9DFA95A7F4D4AEDA956DC41FF4BBB7F','2862',False],
['6D477AC8D8A0468AB0C550741F6EA08A','"24""-SW-B18H-002-"',True],
['72A23EDDC7CE4CA0BDD772DAFD7E0721','"14""-SW-B14H-012-NI"',True],
['BB9000C0523A446C8D8EBF040001D9AE','"14""-SW-B14H-052-"',True],
['5E3A426B25DA4B27B535F6F875A04BDD','"14""-SW-B14H-052-"',True],
['4C4C0ADC37A049C98FB62A6486479B6B','"12""-SW-B14H-024-NI"',True],
['F1E19586B4EB45378E219222F23E81DE','"12""-SW-B14H-029-"',True],
['07512585EEC04D138DE172A265446EAB','"14""-SW-B18H-214-"',True],
['BA319CEB875042359D9CF4E97600F229','"14""-SW-B14H-052-"',True],
['3F2E00C45FE649EE8A71CA366285E8D5','"12""-SW-B14H-024-NI"',True],
['A0D1D209BAF4451D97A7638618323CB0','VALV-52415056',False],
['7105EC5097154D5FBB74C902FE0D5D80','"14""-SW-B14H-005-"',True],
['0FB3089039ED41EDA03910792A1B8295','"12""-SW-B14H-024-NI"',True],
['83162CF17B244AC28C013F6E18B7CFA1','"4""-SW-B18H-066-NI"',True],
['3C8F382CBAF3419DBDA398750DAE4BE9','PV-5241509',False],
['F44DD002B889453AABFEF91498951284','"24""-SW-B18H-214-"',True],
['A9823DCAB84C42AAB695B918988A4C9A','"4""-SW-B18H-066-NI"',True],
['0E579CC8A43B4041B5E5A423CD85C160','FT-5111501B',False],
['D844F8C3224A4DF38EBEAA9745ADB71B','"24""-SW-B18H-214-"',True],
['631B2AD3329F4AE6BA11CB2E9DC64772','VALV-52415024',False],
['7C0D6DBA5F4F4D7BB5AC44E6E1F3DF19','"24""-SW-B18H-214-"',True],
['3DDA4458A31B498CBCCB7E5AC18C6547','"12""-SW-B14H-024-NI"',True],
['5DE1C81C75C346CB93C7A801ECD9E507','"12""-SW-B14H-024-NI"',True],
['4F0C1930CB9F47DDB2ED9077D75D3A3A','"8""-SW-B14H-024-NI"',True],
['16EFA207185146CC88E262A6752CE04B','"12""-SW-B14H-024-NI"',True],
['53F3FFD225B34405BBB7B38C3FB3E69A','"10""-SW-B14H-065-NI"',True],
['6ED95448AA374A91961DC5436DCDF135','"4""-SW-B18H-066-NI"',True],
['663A89CCB54F4BE2B06F689ED00AAB8E','"24""-SW-B18H-214-"',True],
['3704867669F14CDDA29807FC40470AAD','"10""-SW-B14H-024-NI"',True],
['E46DE2C4256C40DFB236642B2F5EEE4D','"4""-SW-B18H-066-NI"',True],
['83D38D3BD9B1479698C567026326A13E','"4""-SW-B18H-066-NI"',True],
['BA41295AA3E444B7AF063D4AC1CEABCA','"12""-SW-B14H-029-"',True],
['C3B0074CAE714210BD2D341BBE9708DD','"24""-SW-B18H-002-"',True],
['C360C6A37CB84CA599B83A6A7053AB0A','"10""-SW-B14H-065-NI"',True],
['260932ACA0B8414B941E0C409671B8B7','"8""-SW-B14H-024-NI"',True],
['6F35FF9A2D2646C6866A91B8ACAF24CD','"4""-SW-B18H-066-NI"',True],
['183943A4A00649659B8A81AC8C23EABD','"14""-SW-B14H-028-"',True],
['CE35BFB23CFE4A7A906E587AEDF5FE4A','"10""-SW-B14H-024-NI"',True],
['B944808064D940B0987AA5D873B35CC1','"14""-SW-B14H-028-"',True],
['1382BE778C6B4F3B9F28B56FC340FB35','"12""-SW-B14H-029-"',True],
['72318B8E2F5A41A0B069E40E01923609','"4""-SW-B18H-066-NI"',True],
['18D89F8791EA4F2EA68BD481A283BB73','"24""-SW-B18H-214-"',True],
['6A2EDF8265E8421DBA33BB5E7580309E','B-5241502B',False],
['A62FC1BB82104D22A9E7CAAB5A0B5412','"4""-SW-B18H-066-NI"',True],
['7E235C3F8F704AC5AF5AF139FDC16E0A','"4""-SW-B18H-066-NI"',True],
['5F8D4187E4C04BAE944274D82E0CDC10','"12""-SW-B14H-029-"',True],
['9AEC07141E1E402294C417F8617551AB','"4""-SW-B18H-066-NI"',True],
['CBFB1B048330472DAB1CC97A2DA1C365','"4""-SW-B18H-066-NI"',True],
['12DEFD367DE64FC89A6B95B752E5AC12','"4""-SW-B18H-066-NI"',True],
['674920F505AD460DB6B02E89B1789491','"4""-SW-B14H-066-NI"',True],
['83614D8665FD4C919EBD168CD616E063','"12""-SW-B14H-024-NI"',True],
['86A00F581D284435BA1AE5EA9DDFB2E5','"12""-SW-B14H-024-NI"',True],
['7E4CDFE4292B4A6C98287BF7C126B689','GG-5241501A',False],
['ABE0388F75204DA082A420753D0E9B08','"12""-SW-B14H-024-"',True],
['C6030B1381D1470BB248F6DD606DBB90','"14""-SW-B14H-026-"',True],
['D23EA902EA884458ACCF8072D555CDE7','"24""-SW-B18H-214-"',True],
['BA88E57D668647579BF3016108E65EE5','"12""-SW-B14H-024-NI"',True],
['596952EFB1D34AA4BFCF64773AF50857','ESP-52415048',False],
['792D0312AE8E4A3DA16CA658179D38A2','"12""-SW-B14H-024-NI"',True],
['3E303AC1E30542B39391373EFC413239','"12""-SW-B14H-024-NI"',True],
['441B037684614EC581B4E02554E4E3CA','"24""-SW-B18H-214-"',True],
['BAF69E705DDF48AA877184E03C4BB52B','2872',False],
['3EF8F05BABDF40259EADC44131819AF0','2872',False],
['05B5F361E4CD4851AC3F2851660BA5D7','"14""-SW-B14H-028-"',True],
['E87EA9C0D1B346F98F4500A422FD4F87','"24""-SW-B18H-002-"',True],
['E2182343C50B4EFCBCF62A52C01469FA','"12""-SW-B14H-024-NI"',True],
['78B19E91F58E41CE8BF250D93F8CABF4','"12""-SW-B14H-024-"',True],
['E9BC66131EB24D9FBEA8881D28B25919','"12""-SW-B14H-029-"',True],
['39A7733B0D2047F88CB9A3E649B860A3','XV-5241549',False],
['537AEE5321A649DD8AEE2AA4F56EBA90','"12""-SW-B14H-029-"',True],
['B433C67E79B84FE0AE9A6C7B68C9BB3E','B-5241502A',False],
['3712D33E3E15451F9E2A0536EFB11BF5','"14""-SW-B14H-026-"',True],
['78EA39E83DDB4B419CEA0183172DD619','"14""-SW-B14H-026-"',True],
['288EFC7D9B254FD8B38C10660776FF9A','"14""-SW-B14H-026-"',True],
['3F0EDC302897422C88BA2269650C237B','HV-5111506',False],
['5F66565B3067499783317018B867B058','XV-5241547',False],
['F0DA3055BB1649A5832776B1042CEEFD','"14""-SW-B14H-026-"',True],
['096E69E3C6D44B4DB767D596AB8A2E09','2858',False],
['105705F450774C218EA05BBE90970FED','"14""-SW-B14H-052-"',True],
['308DB54064384D5F8769EAF223D56CE0','"14""-SW-B14H-005-"',True],
['B90913ACD9EB4181A2719EC49D7BE3DE','"14""-SW-B14H-052-"',True],
['B90643B8D5C043A681B0ACF230DF7CBC','"14""-SW-B14H-052-"',True],
['B8B7989D606A4B5BBFB28F6840986D17','"12""-SW-B14H-024-NI"',True],
['2595E28BB40447F1BDA5716D5CF2CF33','"12""-SW-B14H-024-"',True],
['2C2BB4952A044DCE96DBD2358BBC1911','"14""-SW-B14H-052-"',True],
['EB3F38B98ED9457BB0A1A697F65D14CD','"12""-SW-B14H-024-"',True],
['6A1CA6D29CD645058BF2E71637528693','XV-5241551',False],
['DA75F465071F4B0099024271E55BBB3C','"4""-SW-B18H-066-NI"',True],
['FF2A6FBDC9A8418BA7223752DD262C10','"12""-SW-B14H-024-"',True],
['47B23B0B0D2842EE81F10FBD51ACD595','"12""-SW-B14H-024-"',True],
['B27B64CAA71642DA90B540A0864BF58D','VALV-52415031',False],
['7C553E5F56F940089B25CC7F52A285CE','HV-5241524',False],
['C8AC0F7B15BB4296AAB0451CE54CE0C4','"12""-SW-B14H-024-NI"',True],
['A9A054CF690743A194A1370C128C92BF','"12""-SW-B14H-024-"',True],
['A9A8357A4A284F00964452E9BF83B9A2','"24""-SW-B18H-002-"',True],
['876D1314DD694C6E9E913D180DE8FEEB','"12""-SW-B14H-024-NI"',True],
['537D59BD3F004653B62DA5A6F78C5114','"14""-SW-B14H-028-"',True],
['EE01F70565174F28A16E3EE39DEFF941','"12""-SW-B14H-024-"',True],
['BC9B6CDF14A1452B843D9A1C6D28FB8A','"14""-SW-B14H-028-"',True],
['084CEBB7191E433FAC9D7E9F442FEAD5','XV-5241552',False],
['C3320AA78B5D4EAEB49F8E2B98A7A529','"12""-SW-B14H-024-NI"',True],
['715C7156BC9B4A4FAC8DE23C1C4BC329','"14""-SW-B14H-052-"',True]
]
edges = [
['C558A0627C9446548B23254872701164','950CBDB068994C10963963131A751BE0','Directed','Pipe',None],
['950CBDB068994C10963963131A751BE0','25B5BA5E7AC644CD84276902327BA1E1','Directed','Other','VA'],
['25B5BA5E7AC644CD84276902327BA1E1','E87EA9C0D1B346F98F4500A422FD4F87','Directed','Pipe',None],
['E374ECABE2014723B6F0ADEA1799FDB2','A9FA7C4BC8D344628D783465E50257B8','Directed','Pipe',None],
['A9FA7C4BC8D344628D783465E50257B8','0EB9ED0ACCBE41BB9F7C370F617AC1BD','Directed','Pipe',None],
['0EB9ED0ACCBE41BB9F7C370F617AC1BD','B8C3707136DB4966A6E534E033F778B9','Directed','Other','XV'],
['B8C3707136DB4966A6E534E033F778B9','6D477AC8D8A0468AB0C550741F6EA08A','Directed','Pipe',None],
['E37DCB79E4C64F63B1C071BDAB87A408','0E579CC8A43B4041B5E5A423CD85C160','Directed','Other','FT'],
['563C412FFA1D46D7AEF9F7844237B8BB','D23EA902EA884458ACCF8072D555CDE7','Directed','Pipe',None],
['D2D3BC7AE3804003BA9477122F7BF0A9','563C412FFA1D46D7AEF9F7844237B8BB','Directed','Pipe',None],
['D2D3BC7AE3804003BA9477122F7BF0A9','D23EA902EA884458ACCF8072D555CDE7','Directed','Pipe',None],
['AF238B33DA0341DE92CD067F8C37FC5B','F9B6779DBBEF4BFCBF00B0153DDA6737','Directed','Pipe',None],
['3EE2AA386F8A4DAC879D7C277D7D4511','5F62BFB7075C42D8BB1B3F69B499950B','Directed','Pipe',None],
['5F62BFB7075C42D8BB1B3F69B499950B','3F8F323D51154B47895CD45E32E450E8','Directed','Pipe',None],
['7342DE6A53E44B92A347A0AEB63CBCF5','BA6B12F71AF248C6A8B348428016513E','Directed','Pipe',None],
['EF49451ECDB448C0B57D27BB83FE3770','F44DD002B889453AABFEF91498951284','Directed','Pipe',None],
['BA6B12F71AF248C6A8B348428016513E','152CF901785642D3B4D63C848387ADCC','Directed','Other','HV'],
['152CF901785642D3B4D63C848387ADCC','441B037684614EC581B4E02554E4E3CA','Directed','Pipe',None],
['8E5A24D1DF5042F9871CE6CA08BE4E08','7342DE6A53E44B92A347A0AEB63CBCF5','Directed','Other','HV'],
['51445719BBE2483FA8D2123C5875FA6E','F93E75371BFC4E1ABAB40BAB51D7AB09','Directed','Pipe',None],
['F93E75371BFC4E1ABAB40BAB51D7AB09','7C0D6DBA5F4F4D7BB5AC44E6E1F3DF19','Directed','Pipe',None],
['B7534C996FA1415ABEEF017E1EF2CCC6','07512585EEC04D138DE172A265446EAB','Directed','Pipe',None],
['B7534C996FA1415ABEEF017E1EF2CCC6','A54D5B633BFC4A429D19AC778D3F2371','Directed','Pipe',None],
['A4B1B7995172457886655672603D53F2','B7534C996FA1415ABEEF017E1EF2CCC6','Directed','Pipe',None],
['A54D5B633BFC4A429D19AC778D3F2371','308DB54064384D5F8769EAF223D56CE0','Directed','Pipe',None],
['3B7B5F5CD4BE4A4D86BF814A9B006219','ED84453A06814765A04087F85C8FC487','Directed','Pipe',None],
['ED84453A06814765A04087F85C8FC487','096E69E3C6D44B4DB767D596AB8A2E09','Directed','Pipe',None],
['9316319A4A064FB6AD5134AA4A8946A4','AF238B33DA0341DE92CD067F8C37FC5B','Directed','Pipe',None],
['9316319A4A064FB6AD5134AA4A8946A4','438D83A3617149E99769717D0609552A','Directed','Pipe',None],
['9316319A4A064FB6AD5134AA4A8946A4','F9B6779DBBEF4BFCBF00B0153DDA6737','Directed','Pipe',None],
['438D83A3617149E99769717D0609552A','18D89F8791EA4F2EA68BD481A283BB73','Directed','Pipe',None],
['F9B6779DBBEF4BFCBF00B0153DDA6737','20BEB38A4EFC44BFB7B28A1286E68079','Directed','Pipe',None],
['20BEB38A4EFC44BFB7B28A1286E68079','243FF371296D43A884A895695512F576','Directed','Other','HV'],
['CBC5067AA669438691F35A4D9F316F9A','E374ECABE2014723B6F0ADEA1799FDB2','Directed','Pipe',None],
['243FF371296D43A884A895695512F576','E7DEC852EDE7499A85B00731DC6C81D2','Directed','Pipe',None],
['3F8F323D51154B47895CD45E32E450E8','663A89CCB54F4BE2B06F689ED00AAB8E','Directed','Pipe',None],
['E7DEC852EDE7499A85B00731DC6C81D2','2FB7A19582A04DDD8A619AA2D47A103A','Directed','Pipe',None],
['2FB7A19582A04DDD8A619AA2D47A103A','B9DFA95A7F4D4AEDA956DC41FF4BBB7F','Directed','Pipe',None],
['B9DFA95A7F4D4AEDA956DC41FF4BBB7F','72A23EDDC7CE4CA0BDD772DAFD7E0721','Directed','Pipe',None],
['6D477AC8D8A0468AB0C550741F6EA08A','A9A8357A4A284F00964452E9BF83B9A2','Directed','Pipe',None],
['72A23EDDC7CE4CA0BDD772DAFD7E0721','2C2BB4952A044DCE96DBD2358BBC1911','Directed','Pipe',None],
['BB9000C0523A446C8D8EBF040001D9AE','7C553E5F56F940089B25CC7F52A285CE','Directed','Other','HV'],
['BB9000C0523A446C8D8EBF040001D9AE','BA319CEB875042359D9CF4E97600F229','Directed','Pipe',None],
['5E3A426B25DA4B27B535F6F875A04BDD','715C7156BC9B4A4FAC8DE23C1C4BC329','Directed','Pipe',None],
['4C4C0ADC37A049C98FB62A6486479B6B','C8AC0F7B15BB4296AAB0451CE54CE0C4','Directed','Pipe',None],
['F1E19586B4EB45378E219222F23E81DE','5F8D4187E4C04BAE944274D82E0CDC10','Directed','Pipe',None],
['07512585EEC04D138DE172A265446EAB','A54D5B633BFC4A429D19AC778D3F2371','Directed','Pipe',None],
['BA319CEB875042359D9CF4E97600F229','BB9000C0523A446C8D8EBF040001D9AE','Directed','Pipe',None],
['BA319CEB875042359D9CF4E97600F229','5E3A426B25DA4B27B535F6F875A04BDD','Directed','Pipe',None],
['3F2E00C45FE649EE8A71CA366285E8D5','C360C6A37CB84CA599B83A6A7053AB0A','Directed','Pipe',None],
['3F2E00C45FE649EE8A71CA366285E8D5','CE35BFB23CFE4A7A906E587AEDF5FE4A','Directed','Pipe',None],
['3F2E00C45FE649EE8A71CA366285E8D5','53F3FFD225B34405BBB7B38C3FB3E69A','Directed','Pipe',None],
['3F2E00C45FE649EE8A71CA366285E8D5','E2182343C50B4EFCBCF62A52C01469FA','Directed','Pipe',None],
['A0D1D209BAF4451D97A7638618323CB0','0FB3089039ED41EDA03910792A1B8295','Directed','Pipe',None],
['7105EC5097154D5FBB74C902FE0D5D80','BA319CEB875042359D9CF4E97600F229','Directed','Pipe',None],
['0FB3089039ED41EDA03910792A1B8295','260932ACA0B8414B941E0C409671B8B7','Directed','Pipe',None],
['83162CF17B244AC28C013F6E18B7CFA1','7E4CDFE4292B4A6C98287BF7C126B689','Directed','Other','GG'],
['3C8F382CBAF3419DBDA398750DAE4BE9','4F0C1930CB9F47DDB2ED9077D75D3A3A','Directed','Pipe',None],
['F44DD002B889453AABFEF91498951284','9316319A4A064FB6AD5134AA4A8946A4','Directed','Pipe',None],
['A9823DCAB84C42AAB695B918988A4C9A','A62FC1BB82104D22A9E7CAAB5A0B5412','Directed','Pipe',None],
['A9823DCAB84C42AAB695B918988A4C9A','83D38D3BD9B1479698C567026326A13E','Directed','Pipe',None],
['0E579CC8A43B4041B5E5A423CD85C160','D2D3BC7AE3804003BA9477122F7BF0A9','Directed','Pipe',None],
['D844F8C3224A4DF38EBEAA9745ADB71B','EF49451ECDB448C0B57D27BB83FE3770','Directed','Pipe',None],
['631B2AD3329F4AE6BA11CB2E9DC64772','1382BE778C6B4F3B9F28B56FC340FB35','Directed','Pipe',None],
['7C0D6DBA5F4F4D7BB5AC44E6E1F3DF19','A4B1B7995172457886655672603D53F2','Directed','Pipe',None],
['3DDA4458A31B498CBCCB7E5AC18C6547','83614D8665FD4C919EBD168CD616E063','Directed','Pipe',None],
['5DE1C81C75C346CB93C7A801ECD9E507','86A00F581D284435BA1AE5EA9DDFB2E5','Directed','Pipe',None],
['4F0C1930CB9F47DDB2ED9077D75D3A3A','3DDA4458A31B498CBCCB7E5AC18C6547','Directed','Pipe',None],
['16EFA207185146CC88E262A6752CE04B','4C4C0ADC37A049C98FB62A6486479B6B','Directed','Pipe',None],
['53F3FFD225B34405BBB7B38C3FB3E69A','7E4CDFE4292B4A6C98287BF7C126B689','Directed','Other','GG'],
['6ED95448AA374A91961DC5436DCDF135','72318B8E2F5A41A0B069E40E01923609','Directed','Pipe',None],
['6ED95448AA374A91961DC5436DCDF135','9AEC07141E1E402294C417F8617551AB','Directed','Pipe',None],
['6ED95448AA374A91961DC5436DCDF135','CBFB1B048330472DAB1CC97A2DA1C365','Directed','Pipe',None],
['663A89CCB54F4BE2B06F689ED00AAB8E','8E5A24D1DF5042F9871CE6CA08BE4E08','Directed','Pipe',None],
['3704867669F14CDDA29807FC40470AAD','7E4CDFE4292B4A6C98287BF7C126B689','Directed','Other','GG'],
['E46DE2C4256C40DFB236642B2F5EEE4D','A9823DCAB84C42AAB695B918988A4C9A','Directed','Pipe',None],
['83D38D3BD9B1479698C567026326A13E','7E4CDFE4292B4A6C98287BF7C126B689','Directed','Other','GG'],
['BA41295AA3E444B7AF063D4AC1CEABCA','631B2AD3329F4AE6BA11CB2E9DC64772','Directed','Other','VA'],
['C3B0074CAE714210BD2D341BBE9708DD','E37DCB79E4C64F63B1C071BDAB87A408','Directed','Pipe',None],
['C360C6A37CB84CA599B83A6A7053AB0A','53F3FFD225B34405BBB7B38C3FB3E69A','Directed','Pipe',None],
['260932ACA0B8414B941E0C409671B8B7','3C8F382CBAF3419DBDA398750DAE4BE9','Directed','Other','PV'],
['6F35FF9A2D2646C6866A91B8ACAF24CD','12DEFD367DE64FC89A6B95B752E5AC12','Directed','Pipe',None],
['183943A4A00649659B8A81AC8C23EABD','6A2EDF8265E8421DBA33BB5E7580309E','Directed','Other','B-'],
['CE35BFB23CFE4A7A906E587AEDF5FE4A','3704867669F14CDDA29807FC40470AAD','Directed','Pipe',None],
['B944808064D940B0987AA5D873B35CC1','05B5F361E4CD4851AC3F2851660BA5D7','Directed','Pipe',None],
['1382BE778C6B4F3B9F28B56FC340FB35','39A7733B0D2047F88CB9A3E649B860A3','Directed','Other','XV'],
['72318B8E2F5A41A0B069E40E01923609','7E4CDFE4292B4A6C98287BF7C126B689','Directed','Other','GG'],
['18D89F8791EA4F2EA68BD481A283BB73','3EE2AA386F8A4DAC879D7C277D7D4511','Directed','Pipe',None],
['6A2EDF8265E8421DBA33BB5E7580309E','EE01F70565174F28A16E3EE39DEFF941','Directed','Pipe',None],
['A62FC1BB82104D22A9E7CAAB5A0B5412','83D38D3BD9B1479698C567026326A13E','Directed','Pipe',None],
['7E235C3F8F704AC5AF5AF139FDC16E0A','83162CF17B244AC28C013F6E18B7CFA1','Directed','Pipe',None],
['5F8D4187E4C04BAE944274D82E0CDC10','537AEE5321A649DD8AEE2AA4F56EBA90','Directed','Pipe',None],
['9AEC07141E1E402294C417F8617551AB','DA75F465071F4B0099024271E55BBB3C','Directed','Pipe',None],
['CBFB1B048330472DAB1CC97A2DA1C365','72318B8E2F5A41A0B069E40E01923609','Directed','Pipe',None],
['12DEFD367DE64FC89A6B95B752E5AC12','6ED95448AA374A91961DC5436DCDF135','Directed','Pipe',None],
['674920F505AD460DB6B02E89B1789491','6F35FF9A2D2646C6866A91B8ACAF24CD','Directed','Pipe',None],
['83614D8665FD4C919EBD168CD616E063','5DE1C81C75C346CB93C7A801ECD9E507','Directed','Pipe',None],
['86A00F581D284435BA1AE5EA9DDFB2E5','876D1314DD694C6E9E913D180DE8FEEB','Directed','Pipe',None],
['86A00F581D284435BA1AE5EA9DDFB2E5','674920F505AD460DB6B02E89B1789491','Directed','Pipe',None],
['86A00F581D284435BA1AE5EA9DDFB2E5','6F35FF9A2D2646C6866A91B8ACAF24CD','Directed','Pipe',None],
['ABE0388F75204DA082A420753D0E9B08','FF2A6FBDC9A8418BA7223752DD262C10','Directed','Pipe',None],
['C6030B1381D1470BB248F6DD606DBB90','B433C67E79B84FE0AE9A6C7B68C9BB3E','Directed','Other','B-'],
['D23EA902EA884458ACCF8072D555CDE7','D844F8C3224A4DF38EBEAA9745ADB71B','Directed','Pipe',None],
['BA88E57D668647579BF3016108E65EE5','16EFA207185146CC88E262A6752CE04B','Directed','Pipe',None],
['596952EFB1D34AA4BFCF64773AF50857','C3320AA78B5D4EAEB49F8E2B98A7A529','Directed','Pipe',None],
['792D0312AE8E4A3DA16CA658179D38A2','B8B7989D606A4B5BBFB28F6840986D17','Directed','Pipe',None],
['3E303AC1E30542B39391373EFC413239','792D0312AE8E4A3DA16CA658179D38A2','Directed','Pipe',None],
['441B037684614EC581B4E02554E4E3CA','51445719BBE2483FA8D2123C5875FA6E','Directed','Pipe',None],
['BAF69E705DDF48AA877184E03C4BB52B','3E303AC1E30542B39391373EFC413239','Directed','Pipe',None],
['3EF8F05BABDF40259EADC44131819AF0','BAF69E705DDF48AA877184E03C4BB52B','Directed','Pipe',None],
['05B5F361E4CD4851AC3F2851660BA5D7','183943A4A00649659B8A81AC8C23EABD','Directed','Pipe',None],
['05B5F361E4CD4851AC3F2851660BA5D7','BC9B6CDF14A1452B843D9A1C6D28FB8A','Directed','Pipe',None],
['E87EA9C0D1B346F98F4500A422FD4F87','CBC5067AA669438691F35A4D9F316F9A','Directed','Pipe',None],
['E2182343C50B4EFCBCF62A52C01469FA','CE35BFB23CFE4A7A906E587AEDF5FE4A','Directed','Pipe',None],
['78B19E91F58E41CE8BF250D93F8CABF4','EB3F38B98ED9457BB0A1A697F65D14CD','Directed','Pipe',None],
['E9BC66131EB24D9FBEA8881D28B25919','78B19E91F58E41CE8BF250D93F8CABF4','Directed','Pipe',None],
['39A7733B0D2047F88CB9A3E649B860A3','E9BC66131EB24D9FBEA8881D28B25919','Directed','Pipe',None],
['537AEE5321A649DD8AEE2AA4F56EBA90','BA41295AA3E444B7AF063D4AC1CEABCA','Directed','Pipe',None],
['B433C67E79B84FE0AE9A6C7B68C9BB3E','F1E19586B4EB45378E219222F23E81DE','Directed','Pipe',None],
['3712D33E3E15451F9E2A0536EFB11BF5','78EA39E83DDB4B419CEA0183172DD619','Directed','Pipe',None],
['3712D33E3E15451F9E2A0536EFB11BF5','C6030B1381D1470BB248F6DD606DBB90','Directed','Pipe',None],
['78EA39E83DDB4B419CEA0183172DD619','C6030B1381D1470BB248F6DD606DBB90','Directed','Pipe',None],
['288EFC7D9B254FD8B38C10660776FF9A','3712D33E3E15451F9E2A0536EFB11BF5','Directed','Pipe',None],
['3F0EDC302897422C88BA2269650C237B','3B7B5F5CD4BE4A4D86BF814A9B006219','Directed','Pipe',None],
['5F66565B3067499783317018B867B058','288EFC7D9B254FD8B38C10660776FF9A','Directed','Pipe',None],
['F0DA3055BB1649A5832776B1042CEEFD','5F66565B3067499783317018B867B058','Directed','Other','XV'],
['096E69E3C6D44B4DB767D596AB8A2E09','7105EC5097154D5FBB74C902FE0D5D80','Directed','Pipe',None],
['105705F450774C218EA05BBE90970FED','F0DA3055BB1649A5832776B1042CEEFD','Directed','Pipe',None],
['308DB54064384D5F8769EAF223D56CE0','3F0EDC302897422C88BA2269650C237B','Directed','Other','HV'],
['B90913ACD9EB4181A2719EC49D7BE3DE','105705F450774C218EA05BBE90970FED','Directed','Pipe',None],
['B90643B8D5C043A681B0ACF230DF7CBC','2C2BB4952A044DCE96DBD2358BBC1911','Directed','Pipe',None],
['B90643B8D5C043A681B0ACF230DF7CBC','7C553E5F56F940089B25CC7F52A285CE','Directed','Other','HV'],
['B8B7989D606A4B5BBFB28F6840986D17','596952EFB1D34AA4BFCF64773AF50857','Directed','Other','ES'],
['2595E28BB40447F1BDA5716D5CF2CF33','78B19E91F58E41CE8BF250D93F8CABF4','Directed','Pipe',None],
['2C2BB4952A044DCE96DBD2358BBC1911','B90643B8D5C043A681B0ACF230DF7CBC','Directed','Pipe',None],
['2C2BB4952A044DCE96DBD2358BBC1911','B90913ACD9EB4181A2719EC49D7BE3DE','Directed','Pipe',None],
['EB3F38B98ED9457BB0A1A697F65D14CD','3EF8F05BABDF40259EADC44131819AF0','Directed','Pipe',None],
['6A1CA6D29CD645058BF2E71637528693','2595E28BB40447F1BDA5716D5CF2CF33','Directed','Pipe',None],
['DA75F465071F4B0099024271E55BBB3C','7E235C3F8F704AC5AF5AF139FDC16E0A','Directed','Pipe',None],
['DA75F465071F4B0099024271E55BBB3C','83162CF17B244AC28C013F6E18B7CFA1','Directed','Pipe',None],
['DA75F465071F4B0099024271E55BBB3C','E46DE2C4256C40DFB236642B2F5EEE4D','Directed','Pipe',None],
['FF2A6FBDC9A8418BA7223752DD262C10','B27B64CAA71642DA90B540A0864BF58D','Directed','Other','VA'],
['47B23B0B0D2842EE81F10FBD51ACD595','6A1CA6D29CD645058BF2E71637528693','Directed','Other','XV'],
['B27B64CAA71642DA90B540A0864BF58D','47B23B0B0D2842EE81F10FBD51ACD595','Directed','Pipe',None],
['7C553E5F56F940089B25CC7F52A285CE','B90643B8D5C043A681B0ACF230DF7CBC','Directed','Pipe',None],
['7C553E5F56F940089B25CC7F52A285CE','BB9000C0523A446C8D8EBF040001D9AE','Directed','Pipe',None],
['C8AC0F7B15BB4296AAB0451CE54CE0C4','A0D1D209BAF4451D97A7638618323CB0','Directed','Other','VA'],
['A9A054CF690743A194A1370C128C92BF','ABE0388F75204DA082A420753D0E9B08','Directed','Pipe',None],
['A9A8357A4A284F00964452E9BF83B9A2','C3B0074CAE714210BD2D341BBE9708DD','Directed','Pipe',None],
['A9A8357A4A284F00964452E9BF83B9A2','E37DCB79E4C64F63B1C071BDAB87A408','Directed','Pipe',None],
['876D1314DD694C6E9E913D180DE8FEEB','3F2E00C45FE649EE8A71CA366285E8D5','Directed','Pipe',None],
['537D59BD3F004653B62DA5A6F78C5114','084CEBB7191E433FAC9D7E9F442FEAD5','Directed','Other','XV'],
['EE01F70565174F28A16E3EE39DEFF941','A9A054CF690743A194A1370C128C92BF','Directed','Pipe',None],
['BC9B6CDF14A1452B843D9A1C6D28FB8A','183943A4A00649659B8A81AC8C23EABD','Directed','Pipe',None],
['084CEBB7191E433FAC9D7E9F442FEAD5','B944808064D940B0987AA5D873B35CC1','Directed','Pipe',None],
['C3320AA78B5D4EAEB49F8E2B98A7A529','BA88E57D668647579BF3016108E65EE5','Directed','Pipe',None],
['715C7156BC9B4A4FAC8DE23C1C4BC329','537D59BD3F004653B62DA5A6F78C5114','Directed','Pipe',None]
]
json_data = {
'nodes': {},
'edges': []
}
for (i, node) in enumerate(nodes):
json_data['nodes'][i+1] = {
'tag': node[0],
'label': node[1],
'is_pipe': node[2]
}
for (i, edge) in enumerate(edges):
dict_edge = {
'source': edge[0],
'target': edge[1],
'type': edge[2],
'is_pipe': edge[3],
'other_type': edge[4],
}
json_data['edges'].append(dict_edge)
with open('sample.json', 'w') as outfile:
outtxt = json.dumps(json_data, indent=4)
outfile.writelines(outtxt)
def get_label( label ):
return label
# json_data['nodes'][label]['tag'][0:8]
def label_node( tag ):
for label in json_data['nodes'].keys():
if json_data['nodes'][label]['tag'] == tag:
return get_label(label)
##
title = "Teste Ladeira."
description = "Grafo gerado saida do programa do Ladeira."
sdf_nodes = "\n"
for label in json_data['nodes']:
sdf_nodes += " " * 12 + "<Node label=\"{}\"></Node>\n".format(get_label(label))
sdf_edges = "\n"
for edge in json_data['edges']:
link_in = label_node(edge['source'])
link_out = label_node(edge['target'])
sdf_edges += " " * 12 + "<Pipe input=\"{}\" output=\"{}\"></Pipe>\n".format(link_in, link_out).ljust(12," ")
with open('sample.sdf', 'w') as outfile:
outfile.write("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Project SYSTEM "standard.dtd">
<Project version="1.9 (0)">
<Network-standard>
<Title>{}</Title>
<Network-description>{}</Network-description>
<Nodes>{} </Nodes>
<Links>{} </Links>
</Network-standard>
</Project>
""".format(title, description, sdf_nodes, sdf_edges)
)
| UTF-8 | Python | false | false | 26,693 | py | 1 | convert_pipenet.py | 1 | 0.745926 | 0.372682 | 0 | 365 | 72.128767 | 112 |
bana-handaga/bana-handaga.github.io | 4,243,427,723,310 | 1422f4b2cae48b6b987350253f9b2fcb1c0a8fba | d0b93f9cbe6522850aa64cd0a9258b8ef918e5e7 | /modul-praktikum-iot/modul04/mqtt_led.py | 9a2fb39b7d67bf42cd851fa4236a734d0739125b | [
"MIT"
]
| permissive | https://github.com/bana-handaga/bana-handaga.github.io | 5a46dac41bbd295f50e1ca3e50686353be7c5af6 | 883bb40bb904b266fcfe1562ab8f59601b38b335 | refs/heads/master | 2023-02-20T08:01:26.170506 | 2023-01-06T14:48:47 | 2023-01-06T14:48:47 | 121,956,123 | 3 | 12 | MIT | false | 2023-02-16T01:27:53 | 2018-02-18T13:50:49 | 2021-10-26T13:07:09 | 2023-02-16T01:27:52 | 254,184 | 3 | 11 | 10 | HTML | false | false | """
File: chapter04/mqtt_led.py
A full life-cycle Python + MQTT program to control an LED.
Dependencies:
pip3 install paho-mqtt gpiozero pigpio
Built and tested with Python 3.7 on Raspberry Pi 4 Model B
"""
import logging
import signal
import sys
import json
from time import sleep
from gpiozero import Device, PWMLED
from gpiozero.pins.pigpio import PiGPIOFactory
import paho.mqtt.client as mqtt # (1)
# Initialize Logging
logging.basicConfig(level=logging.WARNING) # Global logging configuration
logger = logging.getLogger("main") # Logger for this module
logger.setLevel(logging.INFO) # Debugging for this file.
# Initialize GPIO
Device.pin_factory = PiGPIOFactory() # Set GPIOZero to use PiGPIO by default.
# Global Variables
LED_GPIO_PIN = 21
BROKER_HOST = "localhost" # (2)
BROKER_PORT = 1883
CLIENT_ID = "LEDClient" # (3)
TOPIC = "led" # (4)
client = None # MQTT client instance. See init_mqtt() # (5)
led = None # PWMLED Instance. See init_led()
"""
GPIO Related Functions
"""
def init_led():
"""Create and initialise an LED Object"""
global led
led = PWMLED(LED_GPIO_PIN)
led.off()
def set_led_level(data): # (6)
"""Set LED State to one of On, Blink or Off (Default)
'data' expected to be a dictionary with the following format:
{
"level": a number between 0 and 100,
}
"""
level = None # a number 0..100
if "level" in data:
level = data["level"]
if isinstance(level, int) or isinstance(level, float) or level.isdigit():
# State is a number
level = max(0, min(100, int(level))) # Bound state to range 0..100
led.value = level / 100 # Scale 0..100% back to 0..1
logger.info("LED at brightness {}%".format(level))
else:
logger.info("Request for unknown LED level of '{}'. We'll turn it Off instead.".format(level))
led.value = 0 # 0% = Led off.
else:
logger.info("Message '{}' did not contain property 'level'.".format(data))
"""
MQTT Related Functions and Callbacks
"""
def on_connect(client, user_data, flags, connection_result_code): # (7)
"""on_connect is called when our program connects to the MQTT Broker.
Always subscribe to topics in an on_connect() callback.
This way if a connection is lost, the automatic
re-connection will also results in the re-subscription occurring."""
if connection_result_code == 0: # (8)
# 0 = successful connection
logger.info("Connected to MQTT Broker")
else:
# connack_string() gives us a user friendly string for a connection code.
logger.error("Failed to connect to MQTT Broker: " + mqtt.connack_string(connection_result_code))
# Subscribe to the topic for LED level changes.
client.subscribe(TOPIC, qos=2) # (9)
def on_disconnect(client, user_data, disconnection_result_code): # (10)
"""Called disconnects from MQTT Broker."""
logger.error("Disconnected from MQTT Broker")
def on_message(client, userdata, msg): # (11)
"""Callback called when a message is received on a subscribed topic."""
logger.debug("Received message for topic {}: {}".format( msg.topic, msg.payload))
data = None
try:
data = json.loads(msg.payload.decode("UTF-8")) # (12)
except json.JSONDecodeError as e:
logger.error("JSON Decode Error: " + msg.payload.decode("UTF-8"))
if msg.topic == TOPIC: # (13)
set_led_level(data) # (14)
else:
logger.error("Unhandled message topic {} with payload " + str(msg.topic, msg.payload))
def signal_handler(sig, frame):
"""Capture Control+C and disconnect from Broker."""
global led_state
logger.info("You pressed Control + C. Shutting down, please wait...")
client.disconnect() # Graceful disconnection.
led.off()
sys.exit(0)
def init_mqtt():
global client
# Our MQTT Client. See PAHO documentation for all configurable options.
# "clean_session=True" means we don"t want Broker to retain QoS 1 and 2 messages
# for us when we"re offline. You"ll see the "{"session present": 0}" logged when
# connected.
client = mqtt.Client( # (15)
client_id=CLIENT_ID,
clean_session=False)
# Route Paho logging to Python logging.
client.enable_logger() # (16)
# Setup callbacks
client.on_connect = on_connect # (17)
client.on_disconnect = on_disconnect
client.on_message = on_message
# Connect to Broker.
client.connect(BROKER_HOST, BROKER_PORT) # (18)
# Initialise Module
init_led()
init_mqtt()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler) # Capture Control + C # (19)
logger.info("Listening for messages on topic '" + TOPIC + "'. Press Control + C to exit.")
client.loop_start() # (20)
signal.pause()
| UTF-8 | Python | false | false | 6,169 | py | 195 | mqtt_led.py | 69 | 0.514184 | 0.501378 | 0 | 169 | 34.502959 | 106 |
rasimon22/QueueHub-Backend | 8,065,948,630,760 | e0cc0a33e1baba441e47f0c179063124f3be498c | 7e994237d1d21eeb2e27a382e52cb0619c4aa1d1 | /qh/lib/python3.6/types.py | b5c70b21ba42da95217f008de0fe41f396778d05 | []
| no_license | https://github.com/rasimon22/QueueHub-Backend | cac275d6bb317ff50f46f1b1bff075dda76e6019 | c72b340270212d63c8b7c5b16f62225dc7709d02 | refs/heads/master | 2020-05-04T06:23:34.419539 | 2019-04-25T20:20:27 | 2019-04-25T20:20:27 | 179,004,695 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /Users/rasimon/anaconda3/lib/python3.6/types.py | UTF-8 | Python | false | false | 47 | py | 54 | types.py | 50 | 0.829787 | 0.765957 | 0 | 1 | 47 | 47 |
y0zhyk/hedge | 541,165,899,468 | e3cb8516110f3173cd993f0e7da24418b178583a | 5129f74d212816ce9e61827b6ca4496290683551 | /hedge/hedgehog/footer.py | 753216681d87b064726028d9e374848a06c7b11f | []
| no_license | https://github.com/y0zhyk/hedge | 4801f25ae2c07e00ef13442a02a94ef1edcc7934 | 951d9917b7cf591bd9aa41044f221c0694260a20 | refs/heads/master | 2021-01-01T18:27:51.507440 | 2015-08-15T08:40:08 | 2015-08-15T08:40:08 | 11,758,203 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class LogoItem:
def __init__(self, image, url):
self.__image = image
self.__url = url
@property
def image(self):
return "images/{}".format(self.__image)
@property
def url(self):
return "http://{}".format(self.__url)
class Logos(list):
"""Represents site footer"""
def __init__(self, logos):
super(Logos, self).__init__()
self.extend(logos)
logos = Logos(
[
LogoItem(image="debian_logo.png", url="www.raspbian.org"),
LogoItem(image="raspberrypi_logo.png", url="www.raspberrypi.org"),
LogoItem(image="python_logo.png", url="www.python.org")
]
)
| UTF-8 | Python | false | false | 658 | py | 21 | footer.py | 11 | 0.56535 | 0.56535 | 0 | 29 | 21.689655 | 74 |
MilkSilk/JO_UEP | 16,724,602,655,249 | b82740ec79a64055268021480b6fb2d9a668886f | 017d1e580ead1bc277a97c17ff5d1149351250a4 | /tests/integration/test_hello.py | 3d4b41b1387e0ba4f2b626bf6408996df3d396a1 | []
| no_license | https://github.com/MilkSilk/JO_UEP | b9d1f3a1829b9c76e169fd98cdec99d19fd1416f | 3be78ef923eedd4de5566e8b97d47bd230020e32 | refs/heads/master | 2023-05-11T08:38:38.740919 | 2020-05-12T12:48:38 | 2020-05-12T12:48:38 | 263,317,161 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from hello import app
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
app.testing = True
self.client = app.test_client()
def test_hello(self):
response = self.client.get('/')
self.assertEqual(200, response.status_code)
self.assertEqual(b"Hello world!", response.data)
| UTF-8 | Python | false | false | 351 | py | 6 | test_hello.py | 5 | 0.646724 | 0.638177 | 0 | 15 | 22.4 | 56 |
reporkey/malaria-model | 4,664,334,531,502 | 6ef4886cc795abfdde1f41948924922bede144de | 944a78daf2bd131163b00a5ae8bbed7628ec4a19 | /recorder.py | 663c8479a014c52e40ec4c6fec0d2d0e951f0ee1 | []
| no_license | https://github.com/reporkey/malaria-model | d08e1159b703dfc14ce698026ef30722eac54437 | 9e51ec3d08f50a69b2d1c270c4b18b01c504c8de | refs/heads/master | 2020-07-13T19:53:50.329437 | 2020-07-10T09:53:40 | 2020-07-10T09:53:40 | 205,142,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
class Recorder:
def __init__(self, p):
self.i = []
self.symp = []
self.r = []
self.im = []
self.G = []
self.time = -1
self.parameters = p
#[p.N, p.S, p.i, p.r, p.beta_M_H, p.beta_H_M, p.day_I_R, p.day_R_S,
# p.bite_per_day, p.life_expectancy,
# (p.gPara.n, p.gPara.k, p.gPara.o, p.gPara.gmax)]
def append(self, i: int, symp: int, r: int, im, G=0):
self.i.append(i)
self.symp.append(symp)
self.r.append(r)
self.im.append(im)
self.G.append(G)
def collectData(self):
data = {}
data["parameter"] = self.parameters.toJson()
data["time"] = self.time
data["i"] = self.i
data["symp"] = self.symp
data["r"] = self.r
data["im"] = self.im
data["G"] = self.G
return data
def ifTerminate(self):
if self.time > 0:
if self.i[-1] == 0 or self.i[-1] == self.parameters.N:
return True
if self.time > 900:
return True
if self.time > 300:
if max(self.i[-300:]) - min(self.i[-300:]) < 0.03 * self.parameters.N:
return True
return False | UTF-8 | Python | false | false | 1,101 | py | 287 | recorder.py | 9 | 0.53406 | 0.514986 | 0 | 45 | 22.511111 | 76 |
gabrielponto/FipeRobot | 3,272,765,081,528 | 518ed4d16627d02a6b14a7494dc73b3274a168ed | 9d37c53b98784bdca6a21dd58f1cbf4e1c6fefcd | /run.py | 58ac88832ed346105234ac26cf9be32d1781d247 | []
| no_license | https://github.com/gabrielponto/FipeRobot | 4a2ca2ee83c2ea7a9e14068add2c0c4a4bbc876d | fca84dccf05a2d6415dd21d75a03315c24f5445f | refs/heads/master | 2020-06-03T04:51:02.792993 | 2016-06-19T19:19:45 | 2016-06-19T19:19:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding=utf-8
import sys, getopt
from libs.db import DB
def runParser(arg):
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals, Spider
from scrapy.utils.project import get_project_settings
from libs.parser.tabelareferencia import TabelaReferenciaSpider
from libs.parser.marca import MarcaSpider
from libs.parser.modelo import ModeloSpider
from libs.parser.anomodelo import AnoModeloSpider
from libs.parser.versao import VersaoSpider
db = DB()
spider = None
if arg == "tabelareferencia":
spider = TabelaReferenciaSpider(db)
if arg == "marca":
spider = MarcaSpider(db)
if arg == "modelo":
spider = ModeloSpider(db)
if arg == "anomodelo":
spider = AnoModeloSpider(db)
if arg == "versao":
spider = VersaoSpider(db)
if spider is None:
print 'Parser não encontrado ->' + arg
sys.exit(2)
crawler = Crawler(get_project_settings())
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
#log.start()
reactor.run()
def getAll(arg):
from models.tabelareferencia import TabelaReferenciaModel
from models.marca import MarcaModel
from models.modelo import ModeloModel
from models.anomodelo import AnoModeloModel
from models.versao import VersaoModel
db = DB()
model = None
if arg == "tabelareferencia":
model = TabelaReferenciaModel(db)
if arg == "marca":
model = MarcaModel(db)
if arg == "modelo":
model = ModeloModel(db)
if arg == "anomodelo":
model = AnoModeloModel(db)
if arg == "versao":
model = VersaoModel(db)
if model is None:
print 'Model não encontrado'
sys.exit(2)
ret = model.listAll(0,10)
if ret['err'] is True:
print model.getError()
else:
print ret
def main(argv):
try:
opts, args = getopt.getopt(argv,"hp:l:",["parser=","list="])
except getopt.GetoptError:
print '[FILE].py -p <model> -list<model>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print '[FILE].py -p <model> -list<model>'
sys.exit()
elif opt in ("-p", "--parser"):
runParser(arg)
elif opt in ("-l", "--list"):
getAll(arg)
if __name__ == '__main__':
main(sys.argv[1:]) | UTF-8 | Python | false | false | 2,209 | py | 18 | run.py | 16 | 0.700498 | 0.696874 | 0 | 97 | 21.762887 | 68 |
jmshin111/alogrithm-test | 6,691,559,065,430 | 836d30eda3d62cc7075c9f2d8d2d4aec9ba14295 | f56201a6e1a7b2f881b0afde452d04de296abb2c | /programmers_level2 test_20201003.py | c4ef5649a5cba88f9a7e0a502aa5f02b821af92a | []
| no_license | https://github.com/jmshin111/alogrithm-test | 7556fe16bf42ba415418c40c3cf69976bf57f342 | 15f73238453ef046752049c2361bf0a254e300d7 | refs/heads/master | 2023-06-27T23:48:44.928916 | 2021-07-11T13:14:45 | 2021-07-11T13:14:45 | 294,851,166 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 1
# num = [3, 6, 30, 34, 5, 9, 7, 95]
# num = [3, 30, 34, 5, 9]
# num = [6,7,9,88,99,34,765,342,123,983]
#num = [3, 30, 34, 5, 191]
# num = [12,121]
# num = [987,9,9,9]
#num = [999,998,997,99,98,97,9,7,8,6,999,998,997,99,98,97,999,998,997,99,98,97,9,7,8,6,5,4,3,2,1,1,1,1,1,1]
# num = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
num = [4,404]
one_num_list = []
two_num_list = []
three_num_list = []
def add_one_more_digit_and_find_max_more_line_1(str_to_compare, digit_status):
temp_str = []
temp_digital_list = []
if (int(digit_status.split('_')[0]) >= 1):
temp_str.append(str(one_num_list[int(digit_status.split('_')[0]) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0]) - 1)+ '_' + str(int(digit_status.split('_')[1]) )+ '_' + str(int(digit_status.split('_')[2]) )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
if (int(digit_status.split('_')[1]) >= 1):
temp_str.append(str(two_num_list[int(digit_status.split('_')[1]) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0]))+ '_' + str(int(digit_status.split('_')[1]) -1 )+ '_' + str(int(digit_status.split('_')[2]) )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
if (int(digit_status.split('_')[2]) >= 1):
temp_str.append(str(three_num_list[int(digit_status.split('_')[2]) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0]) )+ '_' + str(int(digit_status.split('_')[1]) )+ '_' + str(int(digit_status.split('_')[2])-1 )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
temp_max = '0'
temp_max_index = 0
temp_max_digit_status = '0'
temp_digit_status = '000'
for index in range(3):
if (compare_custom_digit(temp_str[index], temp_max, temp_digital_list[index], temp_max_digit_status)):
temp_max = temp_str[index]
temp_max_digit_status = temp_digital_list[index]
temp_max_index = index
return [temp_max_index, str_to_compare + temp_str[temp_max_index], temp_max_digit_status]
def add_one_more_digit_and_find_max_more_line_2(str_to_compare, digit_status):
temp_str = []
temp_digital_list = []
# 11,12, 13, 2, 3
if (int(digit_status.split('_')[0]) >= 2):
temp_str.append(str(one_num_list[int(digit_status.split('_')[0]) - 1]) + str(one_num_list[int(digit_status[0].split('_')) - 2]))
temp_digit_status = str(int(digit_status.split('_')[0])-2 ) + '_' + str(int(digit_status.split('_')[1]) ) + '_' + str(int(digit_status.split('_')[2]) )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
if (int(digit_status.split('_')[0]) >= 1 and int(digit_status.split('_')[1]) >= 1):
temp_str.append(str(one_num_list[int(digit_status[0].split('_')) - 1]) + str(two_num_list[int(digit_status[1].split('_')) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0]) -1 ) + '_'+ str(int(digit_status.split('_')[1])-1 ) + '_' + str(int(digit_status.split('_')[2]) )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
if (int(digit_status.split('_')[0]) >= 1 and int(digit_status.split('_')[2]) >= 1):
temp_str.append(str(one_num_list[int(digit_status[0].split('_')) - 1]) + str(three_num_list[int(digit_status[1].split('_')) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0])-1 ) + '_'+ str(int(digit_status.split('_')[1]) ) + '_' + str(int(digit_status.split('_')[2])-1 )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
if (int(digit_status.split('_')[1]) >= 1):
temp_str.append(str(two_num_list[int(digit_status.split('_')[1]) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0])-1 ) + '_'+ str(int(digit_status.split('_')[1]) -1 ) + '_' + str(int(digit_status.split('_')[2]) )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
if (int(digit_status.split('_')[2]) >= 1):
temp_str.append(str(three_num_list[int(digit_status.split('_')[2]) - 1]))
temp_digit_status = str(int(digit_status.split('_')[0]) ) + '_'+ str(int(digit_status.split('_')[1]) ) + '_' + str(int(digit_status.split('_')[2])-1 )
temp_digital_list.append(temp_digit_status)
else:
temp_str.append('0')
temp_digital_list.append(digit_status)
temp_max = '0'
temp_max_index = '0'
temp_max_digit_status = '0'
temp_digit_status = '000'
for index in range(5):
if (compare_custom_digit(temp_str[index], temp_max, temp_digital_list[index], temp_max_digit_status)):
temp_max = temp_str[index]
temp_max_digit_status = temp_digital_list[index]
temp_max_index = index
return [temp_max_index, str_to_compare + temp_str[temp_max_index], temp_max_digit_status]
def compare_custom_digit(left_num_list_str, right_num_list_str, left_digit_status, right_digit_status):
if len(left_num_list_str) == len(right_num_list_str):
if int(left_num_list_str) > int(right_num_list_str):
return True
else:
return False
elif len(left_num_list_str) > len(right_num_list_str):
if int(left_num_list_str[0:len(right_num_list_str)]) > int(right_num_list_str):
return True
elif int(left_num_list_str[0:len(right_num_list_str)]) < int(right_num_list_str):
return False
else:
if len(left_num_list_str) - len(right_num_list_str) == 1:
max_list = add_one_more_digit_and_find_max_more_line_1(right_num_list_str, right_digit_status)
if (compare_custom_digit(left_num_list_str, max_list[1], left_digit_status, max_list[2])):
return True
else:
return False
else :
max_list = add_one_more_digit_and_find_max_more_line_2(right_num_list_str, right_digit_status)
if (compare_custom_digit(left_num_list_str, max_list[1], left_digit_status, max_list[2])):
return True
else:
return False
elif len(left_num_list_str) < len(right_num_list_str):
if int(right_num_list_str[0:len(left_num_list_str)]) > int(left_num_list_str):
return False
elif int(right_num_list_str[0:len(left_num_list_str)]) < int(left_num_list_str):
return True
else:
if len(right_num_list_str) -len(left_num_list_str) ==1 :
max_list = add_one_more_digit_and_find_max_more_line_1(left_num_list_str, left_digit_status)
if (compare_custom_digit(right_num_list_str, max_list[1], right_digit_status, max_list[2])):
return False
else:
return True
else :
max_list = add_one_more_digit_and_find_max_more_line_2(left_num_list_str, left_digit_status)
if (compare_custom_digit(right_num_list_str, max_list[1], right_digit_status, max_list[2])):
return False
else:
return True
return True
def solution(numbers):
answer = ''
for index_num in numbers:
if len(str(index_num)) == 1:
one_num_list.append(index_num)
elif len(str(index_num)) == 2:
two_num_list.append(index_num)
elif len(str(index_num)) == 3:
three_num_list.append(index_num)
elif len(str(index_num)) == 4:
answer += str(index_num)
one_num_list.sort()
two_num_list.sort()
three_num_list.sort()
while (len(one_num_list) != 0 or len(two_num_list) != 0 or len(three_num_list) != 0):
current_digit_status = str(len(one_num_list)) + '_' + str(len(two_num_list)) + '_' + str(len(three_num_list))
max_list = add_one_more_digit_and_find_max_more_line_1(answer, current_digit_status)
if (max_list[0] == 0 and len(one_num_list) != 0):
answer += str(one_num_list.pop())
elif (max_list[0] == 1 and len(two_num_list) != 0):
answer += str(two_num_list.pop())
elif (max_list[0] == 2 and len(three_num_list) != 0):
answer += str(three_num_list.pop())
return str(int(answer))
#print(solution(num))
# print(solution([6, 10, 2]),6210)
# print(solution([3, 30, 34, 5, 9]),9534330)
# print(solution([40,400]), 40400)
# print(solution([40,404]), 40440)
# print(solution([12,121]), 12121)
# print(solution([3054,305]), 3054305)
# print(solution([3044,304]), 3044304)
# print(solution([340,3403]), 3403403)
# print(solution([340,3402]), 3403402)
# print(solution([340,3405]), 3405340)
# print(solution([40,405]), 40540)
# print(solution([40,404]), 40440)
# print(solution([40,403]), 40403)
# print(solution([40,405]), 40540)
# print(solution([40,404]), 40440)
# print(solution([50,403]), 50403)
# print(solution([50,405]), 50405)
# print(solution([50,404]), 50404)
# print(solution([30,403]), 40330)
# print(solution([30,405]), 40530)
# print(solution([30,404]), 40430)
# print(solution([12,121]), 12121)
print(solution([2,22,223]), 223222)
#print(solution([1, 11, 111, 1111]),1111111111)
# print(solution([41,415]), 41541)
# print(solution([2,22 ]), 222)
# print(solution([70,0,0,0]), 70000)
# print(solution([0,0,0,1000]), 1000000)
# print(solution([0,0,0,0]),0)
# print(solution([0,0,70]),7000)
# print(solution([12,1213]), 121312)
# print(solution([3, 30, 34, 5, 91]),91534330)
# print(solution([3, 30, 34, 5, 191]),534330191)
# print(solution([3, 30, 34, 5, 191, 432789]),543278934330191)
# print(solution([1,2,3,4,5,44]),5444321)
# print(solution([1,2,3,4,5,66]),6654321)
# print(solution([3, 30, 31, 5, 9]),9533130)
# print(solution([3, 30, 31, 34, 5, 9]),953433130)
# print(solution([3, 30, 31, 34, 33, 5, 9]),95343333130)
# print(solution([10, 101]),10110)
print(solution([0, 0, 0, 0, 0, 0]),0)
| UTF-8 | Python | false | false | 10,468 | py | 148 | programmers_level2 test_20201003.py | 146 | 0.575373 | 0.496561 | 0 | 250 | 40.872 | 164 |
shuchitagupta/videoAnnotation | 10,067,403,392,210 | d720d4f9181882ff84861083cd45e18f3e94fb18 | 782525773e95a01be692bb0842c6c616b56b3c38 | /VideoAnnotation/VideoAnnotation-master/new_format.py | a1b66b09c02f2163c371f77aa543126e34dae616 | []
| no_license | https://github.com/shuchitagupta/videoAnnotation | 08834e6f231197ea7ef90072f0f89adb1b3a0f62 | bd740a4a13eaa9cfdbbb61db450e78696388c50d | refs/heads/master | 2021-08-24T11:44:03.694421 | 2017-12-09T15:42:32 | 2017-12-09T15:42:32 | 113,680,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | f = open("./res2i2.csv")
lines = f.readlines()
f.close()
lines = [i.strip() for i in lines]
total_runs = 0
res_f = open("./final_res2i2.csv","w")
for line in lines:
over = line.split(";")[0]
who = line.split(";")[1].split(",")[0]
event = line.split(";")[1].split(",")[1].strip()
if "1 run" in event or event=="1 wide" or event=="1 bye" or event=="1 leg bye":
total_runs+=1
elif event=="2 runs" or event=="2 byes":
total_runs+=2
elif event=="3 runs" or event=="3 byes":
total_runs+=3
elif "FOUR" in event:
total_runs+=4
elif "SIX" in event:
total_runs+=6
res_f.write(over+","+who+","+event+","+str(total_runs)+"\n")
res_f.close() | UTF-8 | Python | false | false | 647 | py | 34 | new_format.py | 10 | 0.598145 | 0.562597 | 0 | 22 | 28.454545 | 80 |
hoka-sp/NLP100 | 9,311,489,105,787 | d7e5f8e880c1fdad6b80b90e5dddb27e6169e209 | 61c4c220a70a18d179ed77a6b02914045945a85d | /knock100/knock29.py | 6322a0921940a7441ea4d679646899fb9b92f225 | []
| no_license | https://github.com/hoka-sp/NLP100 | 5839adedc1b8d522d93859cdad07319fc8967679 | b206c4eb2a43e268fc68fd7b54bf12132389141c | refs/heads/master | 2023-07-13T15:19:44.076759 | 2021-08-09T15:06:47 | 2021-08-09T15:06:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import re
from package import func_wiki
import json
import urllib.parse, urllib.request
args = sys.argv
args.append('ch03/ch03/jawiki-country.json.gz')
def remove_markup_link(text_date):
pattern = r'\'{2,5}'
text_date = re.sub(pattern, '', text_date)
# 内部リンク
pattern = r'\[\[(?:[^|]*?\|)??([^|]*?)\]\]'
text_date = re.sub(pattern, r'\1', text_date)
# テンプレート
# 他元首等氏名2: {{仮リンク|リンゼイ・ホイル|en|Lindsay Hoyle}}
pattern = r'\{\{(?:lang|仮リンク)??(?:[^|]*?\|)*?([^|]*?)\}\}'
text_date = re.sub(pattern, r'\1', text_date)
# マークアップ削除
pattern = r'\[\[(?:.*?\|)([^|]*?)\]\]'
text_date = re.sub(pattern, r'\1', text_date)
# [.*?]削除
pattern = r'\[.*?\]'
text_date = re.sub(pattern, '', text_date)
# 外部リンク
pattern = r'https?://[!?\-\.\w=&%\[\]/]+'
text_date = re.sub(pattern, '', text_date)
# HTMLタグ
pattern = r'<.+?>'
text_date = re.sub(pattern, '', text_date)
return text_date
def request_url(result):
flag = result['国旗画像']
url = 'https://www.mediawiki.org/w/api.php?' \
+ 'action=query' \
+ '&titles=File:' + urllib.parse.quote(flag) \
+ '&format=json' \
+ '&prop=imageinfo' \
+ '&iiprop=url'
request = urllib.request.Request(url,
headers={'User-Agent': 'knock100(@hoka)'})
connection = urllib.request.urlopen(request)
date = json.loads(connection.read().decode())
# print(date)
# {'continue':{'iistart': '2019-09-10T16:52:58Z', 'continue': '||'},
# 'query':{'pages':
# {'-1':
# {'ns': 6,
# 'title': 'File:Flag of the knockited Kingdom.svg',
# 'missing': '',
# 'known': '',
# 'imagerepository': 'shared',
# 'imageinfo': [
# {'url': 'https://upload.wikimedia.org/wikipedia/commons/a/ae/Flag_of_the_knockited_Kingdom.svg',
# 'descriptionurl': 'https://commons.wikimedia.org/wiki/File:Flag_of_the_knockited_Kingdom.svg',
# 'descriptionshorturl': 'https://commons.wikimedia.org/w/index.php?curid=347935'
# }
# ]
# }
# }
# }
# }
url = date['query']['pages'].popitem()[1]['imageinfo'][0]['url']
return url
def main():
pattern = r'^\{\{基礎情報.*?$(.*?)^\}\}'
template = re.findall(pattern, func_wiki.read_wiki(args[1], 'イギリス'), re.MULTILINE + re.DOTALL)
pattern = r'^\|(.+?)\s*=\s*(.+?)(?:(?=\n\|)|(?=\n$))'
result = dict(re.findall(pattern, template[0], re.MULTILINE + re.DOTALL))
result_exe = {field: remove_markup_link(items) for field, items in result.items()}
# ここから
url = request_url(result_exe)
print(url)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,027 | py | 71 | knock29.py | 53 | 0.494991 | 0.48152 | 0 | 97 | 28.845361 | 118 |
Saraswitty/WittyMail | 17,282,948,434,727 | ece536c604da86eb3efa84b01d5cfa7cc5cb2c4e | 1caf4b37a1c8dabc76fbf9242b3db139cdea5e5b | /backend/ai/gender_guesser.py | 517e68c3fcd43fd6f46d51fe010a3cfca5a7d9c9 | [
"MIT"
]
| permissive | https://github.com/Saraswitty/WittyMail | 5c4c0969ecc6eae885b96964f21817ae506503ca | 5ffa875fc5ac86e535893b0f8f423f0763a4821d | refs/heads/master | 2020-04-04T19:48:32.921926 | 2019-03-07T13:59:26 | 2019-03-07T13:59:26 | 156,221,253 | 0 | 0 | MIT | false | 2019-05-01T17:07:55 | 2018-11-05T13:21:43 | 2019-03-10T14:40:05 | 2019-05-01T17:07:54 | 568 | 0 | 0 | 17 | TypeScript | false | false | import requests
male_name_set = set([
'ajay',
'amit',
'omkar',
])
female_name_set = set([
'fatima',
'anamika',
'sonal'
])
# Need a looot of improvement
def guess_gender(name, surname = 'nair'):
name = name.lower()
surname = surname.lower()
if name in male_name_set:
return "male"
if name in female_name_set:
return "female"
# curl -i https://api.namsor.com/onomastics/api/json/gender/Ajinkya/Nair/ind
url = 'https://api.namsor.com/onomastics/api/json/gender/' + name + '/' + surname + '/ind'
data = requests.get(url).json()
gender = data['gender']
if gender != "unknown":
return gender
vowels = "aeiou"
if name[-1:] in vowels:
return "female"
return "male"
| UTF-8 | Python | false | false | 707 | py | 36 | gender_guesser.py | 22 | 0.636492 | 0.635078 | 0 | 37 | 18.108108 | 92 |
gdoumenc/pyinfra | 16,441,134,848,377 | 9ba79e7a0a3063377597e3fe585ddc0345737e00 | 85ef8b35cfc97e178266b794a1c5ad98ee3d4606 | /pyinfra_cli/main.py | e986bf0edf98e443f1dfcf45cf0aa6fd871caf22 | [
"MIT"
]
| permissive | https://github.com/gdoumenc/pyinfra | b251f26c1851cd29326ab3b7a2d12ee4ce649d1e | 03de54cc139f15dd6da9de3b7c2ccd578605af85 | refs/heads/develop | 2021-08-23T16:00:42.249931 | 2017-12-05T15:07:08 | 2017-12-05T15:07:08 | 112,345,687 | 0 | 0 | null | true | 2017-11-28T14:28:23 | 2017-11-28T14:28:23 | 2017-11-13T15:44:47 | 2017-11-24T17:06:52 | 15,225 | 0 | 0 | 0 | null | false | null | # pyinfra
# File: pyinfra_cli/main.py
# Desc: the actual CLI implementation
from __future__ import division, print_function
import logging
import sys
from os import getcwd, path
import click
from pyinfra import (
__version__,
logger,
pseudo_inventory,
pseudo_state,
)
from pyinfra.api import State
from pyinfra.api.attrs import FallbackAttrData
from pyinfra.api.connect import connect_all
from pyinfra.api.exceptions import PyinfraError
from pyinfra.api.facts import get_facts, is_fact
from pyinfra.api.operation import add_op
from pyinfra.api.operations import run_ops
from pyinfra.modules import server
from .config import load_config, load_deploy_config
from .exceptions import CliError
from .inventory import make_inventory
from .log import setup_logging
from .prints import (
dump_state,
print_facts,
print_facts_list,
print_inventory,
print_meta,
print_operations_list,
print_results,
)
from .util import (
get_operation_and_args,
load_deploy_file,
progress_spinner,
run_hook,
)
# Exit handler
def _exit():
print()
print('<-- Thank you, goodbye')
print()
sys.exit(0)
def _print_facts(ctx, param, value):
if not value:
return
print('--> Available facts:')
print_facts_list()
ctx.exit()
def _print_operations(ctx, param, value):
if not value:
return
print('--> Available operations:')
print_operations_list()
ctx.exit()
@click.command()
@click.argument('inventory', nargs=1)
@click.argument('commands', nargs=-1, required=True)
@click.option(
'verbosity', '-v',
count=True,
help='Print std[out|err] from operations/facts.',
)
@click.option('--user', help='SSH user to connect as.')
@click.option('--port', type=int, help='SSH port to connect to.')
@click.option('--key', type=click.Path(), help='Private key filename.')
@click.option('--key-password', help='Privte key password.')
@click.option('--password', help='SSH password.')
@click.option(
'--sudo', is_flag=True, default=False,
help='Whether to execute operations with sudo.',
)
@click.option('--sudo-user', help='Which user to sudo when sudoing.')
@click.option('--su-user', help='Which user to su to.')
@click.option('--parallel', type=int, help='Number of operations to run in parallel.')
@click.option('--fail-percent', type=int, help='% of hosts allowed to fail.')
@click.option(
'--dry', is_flag=True, default=False,
help='Don\'t execute operations on the remote host.',
)
@click.option(
'--limit',
help='Limit the inventory, supports *wildcards and group names.',
)
@click.option(
'--no-wait', is_flag=True, default=False,
help='Don\'t wait between operations for hosts to complete.',
)
@click.option(
'--serial', is_flag=True, default=False,
help='Run operations in serial, host by host.',
)
@click.option(
'--debug', is_flag=True, default=False,
help='Print debug info.',
)
@click.option(
'--debug-data', is_flag=True, default=False,
help='Print host/group data before operations and exit.',
)
@click.option(
'--debug-state', is_flag=True, default=False,
help='Print state data before operations and exit.',
)
@click.option(
'--facts', is_flag=True, is_eager=True, callback=_print_facts,
help='Print available facts list and exit.',
)
@click.option(
'--operations', is_flag=True, is_eager=True, callback=_print_operations,
help='Print available operations list and exit.',
)
@click.version_option(
version=__version__,
prog_name='pyinfra',
message='%(prog)s: v%(version)s',
)
def cli(*args, **kwargs):
'''
pyinfra manages the state of one or more servers. It can be used for
app/service deployment, config management and ad-hoc command execution.
Documentation: pyinfra.readthedocs.io
# INVENTORY
\b
+ a file (inventory.py)
+ hostname (host.net)
+ Comma separated hostnames:
host-1.net,host-2.net,@local
# COMMANDS
\b
# Run one or more deploys against the inventory
pyinfra INVENTORY deploy_web.py [deploy_db.py]...
\b
# Run a single operation against the inventory
pyinfra INVENTORY server.user pyinfra,home=/home/pyinfra
\b
# Execute an arbitrary command on the inventory
pyinfra INVENTORY exec -- echo "hello world"
\b
# Run one or more facts on the inventory
pyinfra INVENTORY fact linux_distribution [users]...
'''
main(*args, **kwargs)
def main(*args, **kwargs):
try:
_main(*args, **kwargs)
except PyinfraError as e:
# Re-raise any internal exceptions that aren't handled by click as
# CliErrors which are.
if not isinstance(e, click.ClickException):
message = getattr(e, 'message', e.args[0])
raise CliError(message)
raise
def _main(
inventory, commands, verbosity,
user, port, key, key_password, password,
sudo, sudo_user, su_user,
parallel, fail_percent,
dry, limit, no_wait, serial,
debug, debug_data, debug_state,
facts=None, operations=None,
):
print()
print('### {0}'.format(click.style('Welcome to pyinfra', bold=True)))
print()
# Setup logging
log_level = logging.DEBUG if debug else logging.INFO
setup_logging(log_level)
deploy_dir = getcwd()
potential_deploy_dirs = []
# This is the most common case: we have a deploy file so use it's
# pathname - we only look at the first file as we can't have multiple
# deploy directories.
if commands[0].endswith('.py'):
deploy_file_dir, _ = path.split(commands[0])
above_deploy_file_dir, _ = path.split(deploy_file_dir)
deploy_dir = deploy_file_dir
potential_deploy_dirs.extend((
deploy_file_dir, above_deploy_file_dir,
))
# If we have a valid inventory, look in it's path and it's parent for
# group_data or config.py to indicate deploy_dir (--fact, --run).
if inventory.endswith('.py') and path.isfile(inventory):
inventory_dir, _ = path.split(inventory)
above_inventory_dir, _ = path.split(inventory_dir)
potential_deploy_dirs.extend((
inventory_dir, above_inventory_dir,
))
for potential_deploy_dir in potential_deploy_dirs:
logger.debug('Checking potential directory: {0}'.format(
potential_deploy_dir,
))
if any((
path.isdir(path.join(potential_deploy_dir, 'group_data')),
path.isfile(path.join(potential_deploy_dir, 'config.py')),
)):
logger.debug('Setting directory to: {0}'.format(potential_deploy_dir))
deploy_dir = potential_deploy_dir
break
# List facts
if commands[0] == 'fact':
command = 'fact'
fact_names = commands[1:]
facts = []
for name in fact_names:
args = None
if ':' in name:
name, args = name.split(':', 1)
args = args.split(',')
if not is_fact(name):
raise CliError('No fact: {0}'.format(name))
facts.append((name, args))
commands = facts
# Execute a raw command with server.shell
elif commands[0] == 'exec':
command = 'exec'
commands = commands[1:]
# Deploy files(s)
elif all(cmd.endswith('.py') for cmd in commands):
command = 'deploy'
commands = commands[0:]
# Check each file exists
for file in commands:
if not path.exists(file):
raise CliError('No deploy file: {0}'.format(file))
# Operation w/optional args
elif len(commands) == 2:
command = 'op'
commands = get_operation_and_args(
commands[0], commands[1],
)
else:
raise CliError('''Invalid commands: {0}
Command usage:
pyinfra INVENTORY deploy_web.py [deploy_db.py]...
pyinfra INVENTORY server.user pyinfra,home=/home/pyinfra
pyinfra INVENTORY exec -- echo "hello world"
pyinfra INVENTORY fact os [users]...'''.format(commands))
print('--> Loading config...')
# Load up any config.py from the filesystem
config = load_config(deploy_dir)
# Load any hooks/config from the deploy file
if command == 'deploy':
load_deploy_config(commands[0], config)
# Arg based config overrides
if sudo:
config.SUDO = True
if sudo_user:
config.SUDO_USER = sudo_user
if su_user:
config.SU_USER = su_user
if parallel:
config.PARALLEL = parallel
if fail_percent is not None:
config.FAIL_PERCENT = fail_percent
print('--> Loading inventory...')
# Load up the inventory from the filesystem
inventory, inventory_group = make_inventory(
inventory,
deploy_dir=deploy_dir,
limit=limit,
ssh_user=user,
ssh_key=key,
ssh_key_password=key_password,
ssh_password=password,
ssh_port=port,
)
# If --debug-data dump & exit
if debug_data:
print_inventory(inventory)
_exit()
# Attach to pseudo inventory
pseudo_inventory.set(inventory)
# Create/set the state
state = State(inventory, config)
state.is_cli = True
state.print_lines = True
state.deploy_dir = deploy_dir
# Setup printing on the new state
print_output = verbosity > 0
print_fact_output = verbosity > 1
state.print_output = print_output # -v
state.print_fact_info = print_output # -v
state.print_fact_output = print_fact_output # -vv
# Attach to pseudo state
pseudo_state.set(state)
# Setup the data to be passed to config hooks
hook_data = FallbackAttrData(
state.inventory.get_override_data(),
state.inventory.get_group_data(inventory_group),
state.inventory.get_data(),
)
# Run the before_connect hook if provided
run_hook(state, 'before_connect', hook_data)
# Connect to all the servers
print('--> Connecting to hosts...')
with progress_spinner(state.inventory) as progress:
connect_all(state, progress=progress)
# Run the before_connect hook if provided
run_hook(state, 'before_facts', hook_data)
# Just getting a fact?
#
if command == 'fact':
print()
print('--> Gathering facts...')
# Print facts as we get them
state.print_fact_info = True
# Print fact output with -v
state.print_fact_output = print_output
fact_data = {}
with progress_spinner(commands) as progress:
for i, (name, args) in enumerate(commands):
fact_data[name] = get_facts(
state, name,
args=args,
)
progress()
print_facts(fact_data)
_exit()
# Prepare the deploy!
#
# Execute a raw command with server.shell
if command == 'exec':
# Print the output of the command
state.print_output = True
add_op(
state, server.shell,
' '.join(commands),
)
# Deploy files(s)
elif command == 'deploy':
print()
print('--> Preparing operations...')
# Number of "steps" to make = number of files * number of hosts
prepare_steps = len(commands) * len(state.inventory)
with progress_spinner(prepare_steps) as progress:
for filename in commands:
load_deploy_file(state, filename, progress=progress)
progress()
# Operation w/optional args
elif command == 'op':
print()
print('--> Preparing operation...')
op, args = commands
add_op(
state, op,
*args[0], **args[1]
)
# Always show meta output
print()
print('--> Proposed changes:')
print_meta(state, inventory)
# If --debug-state, dump state (ops, op order, op meta) now & exit
if debug_state:
dump_state(state)
_exit()
# Run the operations we generated with the deploy file
if dry:
_exit()
print()
# Run the before_deploy hook if provided
run_hook(state, 'before_deploy', hook_data)
print('--> Beginning operation run...')
# Number of "steps" to make = number of operations * number of hosts
operation_steps = len(state.op_order) * len(state.inventory)
with progress_spinner(operation_steps) as progress:
run_ops(
state,
serial=serial,
no_wait=no_wait,
progress=progress,
)
# Run the after_deploy hook if provided
run_hook(state, 'after_deploy', hook_data)
print('--> Results:')
print_results(state, inventory)
_exit()
| UTF-8 | Python | false | false | 12,790 | py | 96 | main.py | 43 | 0.614934 | 0.612744 | 0 | 481 | 25.590437 | 86 |
nesmeyannyshka/Final_Project | 1,090,921,724,345 | 45fb3db516323e0ff1dd2084ba472a7afc7f83c0 | b78a00f7de2ef0cff12d74bb3fa9ef5b177f5f08 | /final_project.py | 3f87b761f69337eec760056028f44ab6af5f4686 | []
| no_license | https://github.com/nesmeyannyshka/Final_Project | bb6b03e1b90a639de6b5d348558801c0c425590f | 8c792639fdc69b2195ee30daf94e2e6f4df0dbb0 | refs/heads/master | 2023-07-25T06:25:34.401636 | 2021-09-06T18:40:28 | 2021-09-06T18:40:28 | 399,170,556 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime
from python_functions.funcs import api_to_bronze, api_to_silver, load_to_bronze_spark, load_to_silver_spark, dwh
default_args = {
"owner": "airflow",
"email_on_failure": False
}
dag = DAG(
dag_id="final_project",
description="Building Data Platform",
schedule_interval="@daily",
start_date=datetime(2021, 8, 22,23,59),
default_args=default_args
)
api_bronze=PythonOperator(
task_id='api_bronze',
dag=dag,
python_callable=api_to_bronze
)
api_silver=PythonOperator(
task_id='api_silver',
dag=dag,
python_callable=api_to_silver
)
dummy1 = DummyOperator(
task_id='start_load_api',
dag=dag
)
dummy2 = DummyOperator(
task_id='start_load_tables',
dag=dag
)
dummy3 = DummyOperator(
task_id='start_load_to_dwh',
dag=dag
)
dwh=PythonOperator(
task_id='dwh',
dag=dag,
python_callable=dwh
)
dummy4 = DummyOperator(
task_id='finish_load_to_dwh',
dag=dag
)
tables=['orders','products','departments','aisles','clients','stores','store_types','location_areas']
for table in tables:
load_to_bronze_group=PythonOperator(
task_id="load_"+table+"_to_bronze",
python_callable=load_to_bronze_spark,
op_kwargs={"table": table}
)
load_to_silver_group=PythonOperator(
task_id="load_"+table+"_to_silver",
python_callable=load_to_silver_spark,
op_kwargs={"table": table}
)
dummy1 >> api_bronze >> api_silver >> dummy3
dummy2 >> load_to_bronze_group >> load_to_silver_group >> dummy3
dummy3 >> dwh >> dummy4
| UTF-8 | Python | false | false | 1,744 | py | 6 | final_project.py | 5 | 0.665711 | 0.65367 | 0 | 74 | 22.486486 | 112 |
aranjan99/Labs | 13,700,945,719,388 | d1c298270d0e4e97a297a239b41de1e370af91f8 | e7d2c55a0504d80a25797df8038f6cfe3ebe09bb | /lab10/planning.py | b9997fd8ad23ea93c3bc8363119b800950c4bc76 | []
| no_license | https://github.com/aranjan99/Labs | 3745ff64c41df5c020ea9f6aa15086f4de16d8ef | 33bc4efbfa5fbc94f18a583135b2c4cff014e317 | refs/heads/master | 2020-03-08T23:28:23.948816 | 2018-05-30T17:59:39 | 2018-05-30T17:59:39 | 128,465,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#author1:
#author2:
from grid import *
from visualizer import *
import threading
from queue import PriorityQueue
import math
import cozmo
from cozmo.util import degrees, distance_mm, speed_mmps
import asyncio
import time
def astar(grid, heuristic):
"""Perform the A* search algorithm on a defined grid
Arguments:
grid -- CozGrid instance to perform search on
heuristic -- supplied heuristic function
"""
heuristicList = {}
heuristicCellMap = PriorityQueue()
heuristicCellMap.put((heuristic(grid.getStart(), grid.getGoals()[0]), 0, (grid.getStart(),0, None)))
heuristicList[grid.getStart()] = heuristic(grid.getStart(), grid.getGoals()[0])
while heuristicCellMap.qsize() > 0:
processCell = heuristicCellMap.get()
if processCell[2][0] == grid.getGoals()[0]:
currCell = processCell[2]
finalPath = []
while currCell:
finalPath.append(currCell[0])
currCell = currCell[2]
grid.setPath(finalPath)
return finalPath.reverse()
heuristicList.pop(processCell[2][0], None)
grid.addVisited(processCell[2][0])
for cell in grid.getNeighbors(processCell[2][0]):
cellHeuristic = heuristic(cell[0], grid.getGoals()[0]) + cell[1] - processCell[1]
if cell not in grid.getVisited() and (cell[0] not in heuristicList or heuristicList[cell[0]] > cellHeuristic):
heuristicCellMap.put((cellHeuristic, processCell[1] - cell[1], (cell[0], cell[1], processCell[2])))
heuristicList[cell[0]] = cellHeuristic
def heuristic(current, goal):
"""Heuristic function for A* algorithm
Arguments:
current -- current cell
goal -- desired goal cell
"""
return math.sqrt((goal[1]-current[1])*(goal[1]-current[1]) + (goal[0]-current[0])*(goal[0]-current[0]))
def cozmoBehavior(robot: cozmo.robot.Robot):
"""Cozmo search behavior. See assignment description for details
Has global access to grid, a CozGrid instance created by the main thread, and
stopevent, a threading.Event instance used to signal when the main thread has stopped.
You can use stopevent.is_set() to check its status or stopevent.wait() to wait for the
main thread to finish.
Arguments:
robot -- cozmo.robot.Robot instance, supplied by cozmo.run_program
"""
global grid, stopevent
cubes = []
start = grid.getStart()
goal = False
updatePath = False
path = []
i = 1
while not stopevent.is_set():
try:
cube = robot.world.wait_for_observed_light_cube(timeout=2)
if cube not in cubes:
updatePath = True
cubes.append(cube)
objects = []
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
objects.append((int(cube.pose.position.x/grid.scale + start[0] + 1) + i, int(cube.pose.position.y/grid.scale + start[1] + 1) + j))
grid.addObstacles(objects);
intersect = [list(filter(lambda x: x in objects, sublist)) for sublist in grid.getGoals()]
if(intersect):
updateGoal = grid.getGoals()[0]
grid.clearGoals()
grid.addGoal((updateGoal[0]+1, updateGoal[1]+1))
if cube.object_id == robot.world.light_cubes[cozmo.objects.LightCube1Id].object_id:
grid.clearGoals()
r1,r2 = getGoalCoord(cube.pose.rotation.angle_z.degrees)
grid.addGoal((int(cube.pose.position.x/grid.scale + start[0] + 1)+r1, int(cube.pose.position.y/grid.scale + start[1] + 1)+r2))
goal = True
else:
updatePath = False
if goal == False:
robot.turn_in_place(degrees(30)).wait_for_completed()
except asyncio.TimeoutError:
if goal == False:
robot.turn_in_place(degrees(30)).wait_for_completed()
if goal == True:
if updatePath == True:
grid.clearStart()
grid.setStart((int(robot.pose.position.x/grid.scale + start[0] + 0.5), int(robot.pose.position.y/grid.scale + start[1] + 0.5)))
astar(grid, heuristic)
path = grid.getPath()
print(path)
i = 1
if(i < len(path)):
print(path[i])
curr_angle = robot.pose.rotation.angle_z
angle = rotateAngle((path[i][0] - path[i-1][0], path[i][1] - path[i-1][1]))
angle_to_turn = angle - curr_angle.degrees
print(angle_to_turn)
robot.turn_in_place(degrees(angle_to_turn)).wait_for_completed()
d = math.sqrt(math.pow(path[1][0] - grid.getStart()[0], 2) + math.pow(path[1][1] - grid.getStart()[1], 2))
print(d*grid.scale)
robot.drive_straight(distance_mm(d*grid.scale), speed_mmps(20)).wait_for_completed()
i=i+1
if(i == len(path)):
stopevent.set()
def rotateAngle(cord):
cord_list = [(1,0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (-1, -1)]
angles = [0, 45, 90, 135, 180, -45, -90, -135]
for i in range(len(cord_list)):
if cord == cord_list[i]:
return angles[i]
return 0
def getGoalCoord(angle):
a, b = 0, 0
if angle >= -22.5 and angle < 67.5:
a, b = -2, -2
elif angle >= 67.5 and angle < 157.5:
a, b = 2, -2
elif angle > 157.5 or angle <= -157.5:
a, b = -2, 2
elif angle > -157.5 and angle <= -22.5:
a, b = 2, 2
return a,b
######################## DO NOT MODIFY CODE BELOW THIS LINE ####################################
class RobotThread(threading.Thread):
"""Thread to run cozmo code separate from main thread
"""
def __init__(self):
threading.Thread.__init__(self, daemon=True)
def run(self):
cozmo.run_program(cozmoBehavior)
# If run as executable, start RobotThread and launch visualizer with empty grid file
if __name__ == "__main__":
global grid, stopevent
stopevent = threading.Event()
grid = CozGrid("emptygrid.json")
visualizer = Visualizer(grid)
updater = UpdateThread(visualizer)
updater.start()
robot = RobotThread()
robot.start()
visualizer.start()
stopevent.set()
| UTF-8 | Python | false | false | 6,829 | py | 9 | planning.py | 8 | 0.537121 | 0.512813 | 0 | 172 | 38.69186 | 158 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.