repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
naveenv20/myseleniumpythonlearn | 4,226,247,834,618 | b32ec313f43ad7071085691021ac0f1691563e5b | 70d9f2ec1f44b124962fa1895532084238c63a2a | /ControlFlow/forloop.py | 8158c49eb495ddc2497c5bbcd839452b334f46cd | []
| no_license | https://github.com/naveenv20/myseleniumpythonlearn | eee6f1eefdcab96de95060ab13888f5435eb1cbc | e30203e975263be07f3f2a4207c55e680cce9040 | refs/heads/master | 2020-12-27T00:43:42.004206 | 2020-02-03T20:50:14 | 2020-02-03T20:50:14 | 237,708,854 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
For Loop
and looping through strings,list,tuple,dictionary
"""
my_string="abcdefghijklmno"
for c in my_string:
if(c=="g"):
print('G')
else:
print(c, end=" ") ## am not aksing to print in new line ..so using end am saying print with " "
cars=['bmw','honda','benz']
for car in cars:
print(car)
nums=(1,2,3,4,5,6)
for num in nums:
print(num*10, end=" ")
dict={'name':'Hari','age': 33,'complex':'fair'}
for k in dict:
print(k) ## by default keye are printed
print(dict[k])
for k,v in dict.items():
print(k,v)
print("^^"*20)
## Zipping functoin
l1=[1,2,3,4]
l2=[22,33,44,55,66,77,88,99]
for a,b in zip(l1,l2): ## stops at the shorter list , here l1 has 4 itesm so they both will print for 4 times and stops
print(a," ", b)
| UTF-8 | Python | false | false | 783 | py | 22 | forloop.py | 20 | 0.605364 | 0.555556 | 0 | 37 | 20.081081 | 119 |
simon-ritchie/apyscript | 6,717,328,892,939 | e5ceeced97a4351d5f0f501c346e8cd02b8ed4fa | 175bf8bae6f380e2134fe7332d5ee4cfca756c0a | /tests/_time/test_month_end_mixin.py | 65bc39a04f79290e64439e6d6acad8a0be22b401 | [
"MIT",
"CC-BY-4.0"
]
| permissive | https://github.com/simon-ritchie/apyscript | 7fb8a4b7bf75a5189127b59b78f55e4185918c54 | 6e3f2881f40deeb5409e93cf0a8971819845e689 | refs/heads/main | 2023-03-19T12:38:17.113129 | 2023-03-18T02:35:36 | 2023-03-18T02:35:36 | 334,394,290 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import apysc as ap
from apysc._expression import expression_data_util
from apysc._testing.testing_helper import apply_test_settings
from apysc._time.month_end_mixin import MonthEndMixin
class TestMonthEndMixin:
@apply_test_settings()
def test_set_month_end(self) -> None:
expression_data_util.empty_expression()
mixin: MonthEndMixin = MonthEndMixin()
mixin.variable_name = "test_mixin"
mixin._year = ap.Int(2022)
mixin._month = ap.Int(12)
mixin._day = ap.Int(5)
mixin.set_month_end()
assert mixin._day == 31
expression: str = expression_data_util.get_current_expression()
expected: str = (
f"{mixin.variable_name}.setDate(1);"
f"\n{mixin.variable_name}.setMonth({mixin.variable_name}.getMonth() + 1);"
f"\n{mixin.variable_name}.setDate(0);"
)
assert expected in expression
| UTF-8 | Python | false | false | 941 | py | 1,701 | test_month_end_mixin.py | 889 | 0.621679 | 0.608927 | 0 | 25 | 35.64 | 86 |
markanethio/gretel-python-client | 18,459,769,448,599 | 82b6b2f28abdc3b278fc321537214f598a741883 | 6ca10769948e2eb678f24f37500e5df5aa27b02a | /tests/src/gretel_client/unit/transformers/transformers/test_bucket.py | 7785c76a00790f5f3459d8d8aca96c4d7b4bcd83 | [
"Python-2.0",
"Apache-2.0"
]
| permissive | https://github.com/markanethio/gretel-python-client | cc04a61ee04b674be4293baa89589b95c3c86843 | f910a64550fd6fba75f6d347f2a1251694dbde80 | refs/heads/master | 2023-06-25T01:36:54.985590 | 2021-06-17T16:13:06 | 2021-06-17T16:13:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gretel_client.transformers.base import factory
from gretel_client.transformers import DataTransformPipeline, DataPath
from gretel_client.transformers.transformers.bucket import (
BucketConfig,
bucket_creation_params_to_list,
get_bucket_labels_from_creation_params,
Bucket,
BucketCreationParams,
)
def test_bucket(safecast_test_bucket2):
bucket_list = [
Bucket(20.0, 23.0, "Low"),
Bucket(23.0, 24.0, "Med"),
Bucket(24.0, 25.0, "High"),
]
bucket_config = BucketConfig(buckets=bucket_list)
data_paths = [
DataPath(input="payload.env_temp", xforms=bucket_config),
DataPath(input="*"),
]
xf = DataTransformPipeline(data_paths)
recs = []
for rec in safecast_test_bucket2.get("data", {}).get("records"):
recs.append(dict(xf.transform_record(rec.get("data"))))
assert recs[0]["payload.env_temp"] == "Low"
assert recs[4]["payload.env_temp"] == "Med"
assert recs[7]["payload.env_temp"] == "High"
def test_string_bucket():
bucket_list = [Bucket("a", "l", "a-l"), Bucket("m", "s", "m-s")]
xf = factory(
BucketConfig(
buckets=bucket_list, labels=["person_name"], upper_outlier_label="t-z"
)
)
_, check = xf.transform_entity("person_name", "myers")
assert check == "m-s"
_, check = xf.transform_entity("person_name", "ehrath")
assert check == "a-l"
def test_type_mismatch():
bucket_list = [Bucket("a", "l", "a-l"), Bucket("m", "s", "m-s")]
xf = factory(
BucketConfig(
buckets=bucket_list, labels=["person_name"], upper_outlier_label="t-z"
)
)
assert (None, 123) == xf.transform_entity("person_name", 123)
def test_bucket2(safecast_test_bucket2):
bucket_list = [
Bucket(22.0, 23.0, "FEET_0"),
Bucket(23.0, 24.0, "FEET_1"),
Bucket(24.0, 25.0, "FEET_2"),
]
bucket_config = [
BucketConfig(
buckets=bucket_list, lower_outlier_label="YEET", upper_outlier_label="WOOT"
)
]
data_paths = [
DataPath(input="payload.env_temp", xforms=bucket_config),
DataPath(input="*"),
]
xf = DataTransformPipeline(data_paths)
recs = []
for rec in safecast_test_bucket2.get("data", {}).get("records"):
recs.append(xf.transform_record(rec.get("data")).get("payload.env_temp"))
assert recs == [
"YEET",
None,
None,
None,
"FEET_1",
None,
None,
"WOOT",
None,
None,
None,
]
bucket_list = [
Bucket(21.0, 22.0, "nice"),
Bucket(22.0, 23.0, "bearable"),
Bucket(23.0, 24.0, "toasty"),
Bucket(24.0, 25.0, "volcano"),
Bucket(25.0, 26.0, "nuke"),
]
bucket_config = BucketConfig(buckets=bucket_list)
data_paths = [
DataPath(input="payload.env_temp", xforms=bucket_config),
DataPath(input="*"),
]
xf = DataTransformPipeline(data_paths)
recs = []
for rec in safecast_test_bucket2.get("data", {}).get("records"):
recs.append(xf.transform_record(rec.get("data")).get("payload.env_temp"))
assert recs == [
"nice",
None,
None,
None,
"toasty",
None,
None,
"nuke",
None,
None,
None,
]
def test_config_helpers():
buckets = bucket_creation_params_to_list(
BucketCreationParams(0.0, 10.0, 2.5), label_method="avg"
)
bucket_labels = get_bucket_labels_from_creation_params(
BucketCreationParams(0.0, 10.0, 2.5), label_method="avg"
)
bucket_vals = [0.0, 2.5, 5.0, 7.5, 10.0]
bucket_label_vals = [1.25, 3.75, 6.25, 8.75]
for idx in range(len(buckets)):
assert abs(buckets[idx].min - bucket_vals[idx]) < 0.01
for idx in range(len(bucket_labels)):
assert abs(bucket_labels[idx] - bucket_label_vals[idx]) < 0.01
assert len(buckets) == 4
assert len(bucket_labels) == 4
buckets = bucket_creation_params_to_list(
BucketCreationParams(0.0, 10.0, 2.8), label_method="avg"
)
bucket_labels = get_bucket_labels_from_creation_params(
BucketCreationParams(0.0, 10.0, 2.8), label_method="avg"
)
bucket_vals = [0.0, 2.8, 5.6, 8.4, 10.0]
bucket_label_vals = [1.4, 4.2, 7.0, 9.8]
for idx in range(len(buckets)):
assert abs(buckets[idx].min - bucket_vals[idx]) < 0.01
for idx in range(len(bucket_labels)):
assert abs(bucket_labels[idx] - bucket_label_vals[idx]) < 0.01
assert len(buckets) == 4
assert len(bucket_labels) == 4
def test_type_error():
tup = BucketCreationParams(0.0, 1.0, 0.5)
buckets = bucket_creation_params_to_list(tup)
paths = [DataPath(input="foo", xforms=BucketConfig(buckets=buckets))]
pipe = DataTransformPipeline(paths)
r = {"foo": "bar"}
# String throws a TypeError. We catch it and return original record.
assert r == pipe.transform_record(r)
def test_bucketing():
tup = BucketCreationParams(0.0, 1.0, 0.5)
buckets = bucket_creation_params_to_list(tup, label_method="avg")
paths = [
DataPath(
input="foo",
xforms=BucketConfig(
buckets=buckets, lower_outlier_label=0.0, upper_outlier_label=1.0
),
)
]
pipe = DataTransformPipeline(paths)
r = [{"foo": "bar"}, {"foo": -1}, {"foo": 0.1}, {"foo": 0.9}, {"foo": 1.1}]
out = [pipe.transform_record(rec) for rec in r]
assert out == [
{"foo": "bar"},
{"foo": 0.0},
{"foo": 0.25},
{"foo": 0.75},
{"foo": 1.0},
]
| UTF-8 | Python | false | false | 5,642 | py | 131 | test_bucket.py | 90 | 0.57072 | 0.534562 | 0 | 184 | 29.663043 | 87 |
wkostuch/comp-phys-niceties | 2,662,879,754,934 | f48e07cfa1ba15ada80898e965338a3fb33e7b7b | e5875a54ebbbc795b19803a8c09fa4e9838431ca | /runge_kutta_4_example.py | 4e9dd58ff3197e9173cfe06ff314aacb86266e09 | []
| no_license | https://github.com/wkostuch/comp-phys-niceties | 89fb2bfffb1a611d08a2fa6af5cda0bfeba9a895 | ed5ff0c7e03668041eacf13008bee66796e428d0 | refs/heads/master | 2023-01-24T15:58:02.553065 | 2020-11-21T19:47:02 | 2020-11-21T19:47:02 | 295,499,521 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Runge-Kutta 4 example
import numpy as np
import matplotlib.pyplot as plt
'''
Application of the RK4 algorithm
Want to solve dx/dt = 1 - t*sin(x) with initial condition x = 0 at t = 0
Up to t = 10
'''
def der(x: float, t: float) -> float:
"""Derivative function from above."""
return 1 - t * np.sin(x)
# Initial conditions
t0, x0 = 0, 0
h = 0.01
# Create arrays for storing computed values
tc = np.arange(0, 10, h)
xc = []
x = x0
for index,t in enumerate(tc):
k1 = der(x, t) * h
k2 = der(x + k1/2, t + h/2) * h
k3 = der(x + k2/2, t + h/2) * h
k4 = der(x * k3, t + h) * h
x += (k1 + 2*k2 + 2*k3 + k4) / 6
xc.append(x)
plt.plot(tc, xc)
plt.xlabel("Time (s)")
plt.ylabel("x(t)")
plt.show()
| UTF-8 | Python | false | false | 729 | py | 34 | runge_kutta_4_example.py | 31 | 0.57476 | 0.524005 | 0 | 36 | 19.25 | 72 |
renmengye/tfplus | 4,140,348,521,819 | a059e16a8fe459f7595fcf8a9cc9f3a132f0299a | cf4c919ff3d1f7f9f5390497f0346f0bc859b1a5 | /tfplus/__init__.py | 83b6247d84475ff88d694e2188bcb1891a7d85df | [
"MIT"
]
| permissive | https://github.com/renmengye/tfplus | 692d0b74e2b25fc352d9c3597a3d5dcb09e86ba8 | 9b1f71d33f24af510c701c7d6f6bb816720ed701 | refs/heads/master | 2020-04-05T02:15:39.628949 | 2016-11-21T19:30:25 | 2016-11-21T19:30:25 | 62,580,849 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from expr import experiment, runner
import data
import nn
from utils import cmd_args
from utils.cmd_args import init
| UTF-8 | Python | false | false | 117 | py | 58 | __init__.py | 57 | 0.820513 | 0.820513 | 0 | 5 | 22.4 | 35 |
avitalRubin/python-class | 11,416,023,087,543 | c005d4b7916f3a6b9e446b656fa73dc9401e4fdd | 8103968238017b637117712816d6fe9c87c289b1 | /sequence_operators.py | 401a2c91961ffd05ec99ad1f6e7bb9a07ce24ee9 | []
| no_license | https://github.com/avitalRubin/python-class | 8481a1a098cd9d5f5671e3aa2b25c990d137a9e9 | 96001a558a9ac2b13d6d5a16f3130c2b6322196c | refs/heads/master | 2020-12-12T11:01:29.739388 | 2020-01-15T19:50:47 | 2020-01-15T19:50:47 | 234,112,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | str1="he's"
str2="probably"
str3="pining"
print(str1+" "+ str2+" "+str3);
print (str1*5);
age=24
print("my age is {0} years old".format(age));
m1="spam"+"eggs"+"beans";
m2="""spam
eggs
beans"""
print(m1)
print(m2)
days="Mon, Tue, Wed, Thu, Fri, Sat, Sun"
print(days[::5])
data="1:A, 2:B, 3:C, 4:D, 5:E, 6:F, 7:G, 8:F"
print(data[::5])
print(data[1:5])
print(data[0:-1:5])
print(data[:-1:5]) | UTF-8 | Python | false | false | 390 | py | 9 | sequence_operators.py | 8 | 0.6 | 0.517949 | 0 | 20 | 18.55 | 45 |
DLwbm123/generalized-variational-continual-learning | 5,686,536,700,018 | 1272ab879c27b4766f03c2fd75ea9fbbc06b4c8d | 9534d5f23354d7670d61d5beb0c352986df2c785 | /src/best_hyperparams.py | 17307a064d8dcb91f774b8dffd4fe217e6410baa | []
| no_license | https://github.com/DLwbm123/generalized-variational-continual-learning | e3d0ae48a3f02028b3b1d7375a547fb733f1bef8 | 659b88161db0b1d884a7ebcdf477dbef3ad95add | refs/heads/master | 2023-04-16T07:13:05.120551 | 2021-05-03T18:40:10 | 2021-05-03T18:40:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def get_best_params(approach, experiment):
#the best hyperparams for all the experiments
param = None
epochs = 200
lr = 0.05
if experiment == 'mixture':
if approach in ['ewc', 'ewc-film']:
param = 5
if approach == 'hat':
param = '0.75,400'
if approach == 'imm-mean':
param = 0.0001
if approach == 'imm-mode':
param = '1'
if approach == 'lfl':
param = 0.05
if approach == 'lwf':
param = '2,1'
if approach == 'pathnet':
param = 20
if approach == 'gvclf':
param = '0.2,100'
if approach == 'gvcl':
param = '0.2, 1000'
if approach == 'vcl':
param = '1,1'
if 'vcl' in approach:
epochs = 180
lr = 1e-4
elif experiment == 'cifar':
if approach in ['ewc', 'ewc-film']:
param = 100
if approach == 'hat':
param = '0.025,50'
if approach == 'imm-mean':
param = 0.0001
if approach == 'imm-mode':
param = '1e-5'
if approach == 'lfl':
param = 0.05
if approach == 'lwf':
param = '2,4'
if approach == 'pathnet':
param = 100
if approach == 'gvclf':
param = '0.2,100'
if approach == 'gvcl':
param = '0.2,1000'
if approach == 'vcl':
param = '1,1'
if 'vcl' in approach:
epochs = 60
lr = 1e-3
elif experiment == 'easy-chasy':
epochs = 1000
if approach in ['ewc', 'ewc-film']:
param = 100
if approach == 'hat':
param = '1,10'
if approach == 'imm-mean':
param = 0.0005
if approach == 'imm-mode':
param = '1e-7'
if approach == 'lfl':
param = 0.1
if approach == 'lwf':
param = '0.5,4'
if approach == 'pathnet':
param = 20
if approach == 'gvclf':
param = '0.05,10'
if approach == 'gvcl':
param = '0.05,100'
if approach == 'vcl':
param = '1,1'
if 'vcl' in approach:
epochs = 1500
lr = 1e-3
elif experiment == 'hard-chasy':
epochs = 1000
if approach in ['ewc', 'ewc-film']:
param = 500
if approach == 'hat':
param = '1,50'
if approach == 'imm-mean':
param = '1e-6'
if approach == 'imm-mode':
param = '0.1'
if approach == 'lfl':
param = 0.1
if approach == 'lwf':
param = '0.5,2'
if approach == 'pathnet':
param = 200
if approach == 'gvclf':
param = '0.05,10'
if approach == 'gvcl':
param = '0.05,100'
if approach == 'vcl':
param = '1,1'
if 'vcl' in approach:
epochs = 1500
lr = 1e-3
elif experiment == 'smnist':
if approach in ['ewc', 'ewc-film']:
# param = 1 #10000
param = 10000
if approach == 'hat':
param = '0.1,50'
if approach == 'imm-mean':
param = 0.0005
if approach == 'imm-mode':
param = '0.1'
if approach == 'lfl':
param = 0.1
if approach == 'lwf':
param = '2,4'
if approach == 'pathnet':
param = 10
if approach == 'gvclf':
param = '0.1,100'
if approach == 'gvcl':
param = '0.1,1'
if approach == 'vcl':
param = '1,1'
if 'vcl' in approach:
epochs = 100
lr = 1e-3
return param, lr, epochs
| UTF-8 | Python | false | false | 3,816 | py | 22 | best_hyperparams.py | 19 | 0.415094 | 0.359539 | 0 | 144 | 25.5 | 49 |
MisLink/Little-Projects | 6,081,673,724,653 | ccf76a9923530e56fbde4778a26bdaa0e08e29ef | fd4bb91ef4eed298f952ff9a80f519dbeddc5702 | /Text/ex8_realtime_quotes/forms.py | a77329992bec62de648f027d8e21aeadb5968b52 | []
| no_license | https://github.com/MisLink/Little-Projects | 1ed2210077a6848f6f5c90b26eb44f4d09f5936b | 013d76c23002ee9719f532c0dc703255cfd734ea | refs/heads/master | 2016-08-12T16:53:32.456855 | 2016-05-13T07:44:02 | 2016-05-13T07:44:02 | 54,758,919 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask.ext.wtf import Form
from wtforms import StringField, SelectField, SubmitField, IntegerField
class InputForm(Form):
type = SelectField('股票类型', choices=[('sh', '上证指数'), ('sz', '深证成指')])
id = StringField('股票代码')
refresh = IntegerField('刷新时间')
submit = SubmitField('提交')
| UTF-8 | Python | false | false | 340 | py | 10 | forms.py | 8 | 0.682432 | 0.682432 | 0 | 9 | 31.888889 | 72 |
30s/gwwx | 2,980,707,308,829 | 3ae83c44646b8fddbf8a083c36fdc334a417ce78 | 3deac733ed5a405be6b3d0e574659355aacce518 | /gwwx/views.py | c9f5f57ce1211fee21df491e6513105b17566bec | []
| no_license | https://github.com/30s/gwwx | e6189174182a1bd871fec363a1f47ef4da157bcd | 055585422412d5989f582f3ba433b962e1206ad9 | refs/heads/master | 2021-01-18T08:02:08.938276 | 2014-12-18T16:06:45 | 2014-12-18T16:06:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.http import HttpResponse
from django.views.generic import View
from wechatpy.utils import check_signature
from wechatpy.exceptions import InvalidSignatureException
class Wechat(View):
def dispatch(self, *args, **kwargs):
signature = self.request.GET.get('signature', '')
timestamp = self.request.GET.get('timestamp', '')
nonce = self.request.GET.get('nonce', '')
try:
check_signature(settings.TOKEN, signature, timestamp, nonce)
except InvalidSignatureException:
return HttpResponse(status=403)
return super(Wechat, self).dispatch(*args, **kwargs)
def get(self, request):
echo_str = request.GET.get('echostr', '')
return HttpResponse(echo_str)
| UTF-8 | Python | false | false | 791 | py | 4 | views.py | 2 | 0.680152 | 0.676359 | 0 | 24 | 31.958333 | 72 |
raulds77/inference_project | 3,917,010,214,457 | c4cf18e6fcfa1934a34b593bea68174e3b224669 | 90ed9a87d46177db51f64b35bcd3d7912501ebb8 | /vbmfa/fa.py | 78229bc1f111e4d2c7d6cae1ccbfb11fc9413ca6 | []
| no_license | https://github.com/raulds77/inference_project | 5ac2a9accb87c81eb42af13e334ce6de670f1687 | 501d55bda477b2e8235e0182bdaeddf8d4ac0fad | refs/heads/master | 2021-08-30T13:07:53.192468 | 2017-12-18T04:02:14 | 2017-12-18T04:02:14 | 112,255,591 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Variational Bayesian Factor Analyser.
Implementation of a single factor analyser.
Model parameters are inferred by variational Bayes.
"""
import numpy as np
from scipy.special import digamma
class VbFa(object):
"""Variational Bayesian Factor Analyser
Takes a :math:`p \\times n` data matrix :math:`y` with :math:`n` samples
:math:`y_i` of dimension :math:`p`, and describes them as a linear
combination of :math:`q` latent factors:
.. math::
P(y_i|\Lambda, x_i, \Psi) = N(y_i|\Lambda x_i + \mu, \Psi)
:math:`\\Lambda` is the :math:`p \\times q` factor matrix, :math:`x_i` the
:math:`q` dimensional representation of :math:`y_i`, :math:`\\mu` the mean
vector, and :math:`\\Psi` the diagonal noise matrix.
Parameters
----------
y : :py:class:`numpy.ndarray`
Data matrix with samples in columns and features in rows
q : int
Dimension of low-dimensional space (# factors)
hyper : :py:class:`fa.Hyper`
Attributes
----------
Y : :py:class:`numpy.ndarray`
Data matrix with samples in columns and features in rows
P : int
Dimension of high-dimensional space
Q : int
Dimension of low-dimensional space (# factors)
N : int
# Samples
hyper : :py:class:`fa.Hyper`
Hyperparameters
q_nu : :py:class:`fa.Nu`
Nu distribution
q_mu : :py:class:`fa.Mu`
Mu distribution
q_lambda : :py:class:`fa.Lambda`
Lambda distribution
q_x : :py:class:`fa.X`
X distribution
Examples
--------
.. code:: python
fa = VbFa(data, q=2)
fa.fit()
print(fa.q_lambda.mean)
print(fa.q_x.mean)
"""
def __init__(self, y, q=None, hyper=None):
self.Y = y
self.P = self.Y.shape[0]
self.Q = self.P if q is None else q
self.N = self.Y.shape[1]
if hyper is None:
self.HYPER = Hyper(self.P, self.Q)
else:
self.HYPER = hyper
self.q_nu = Nu(self.Q)
self.q_mu = Mu(self.P)
self.q_lambda = Lambda(self.P, self.Q)
self.q_x = X(self.Q, self.N)
def fit(self, maxit=10, eps=0.0, verbose=False):
"""Fit model parameters by updating factors for several iterations
and return number of update iterations.
Parameters
----------
maxit : int
Maximum number of update iterations
eps : float
Stop if change in MSE is below eps
verbose : bool
Print statistics
Returns
-------
num_it : int
Number of iterations
"""
self.init()
i = 0
while i < maxit:
mse_old = self.mse()
self.update()
mse_new = self.mse()
delta = mse_old - mse_new
i += 1
if verbose:
print('{:d}: {:.3f}'.format(i, mse_new))
if delta < eps:
break
return i
def mse(self):
"""Compute mean squared error (MSE) between original data and
reconstructed data.
Returns
-------
mse : float
Mean squared error
"""
return np.linalg.norm(self.Y - self.x_to_y())
self.q_x = X(self.Q, self.N)
def x_to_y(self, x=None):
"""Reconstruct data from low-dimensional representation.
Parameters
----------
x : :py:class:`numpy.ndarray`
low-dimensional representation of the data
Returns
-------
y : :py:class:`numpy.ndarray`
High-dimensional representation
"""
if x is None:
x = self.q_x.mean
return self.q_lambda.mean.dot(x) + self.q_mu.mean[:, np.newaxis]
def q(self, name):
"""Return distribution q with the given name.
Parameters
----------
name : str
Name of the q distribution
"""
if name == 'nu':
return self.q_nu
elif name == 'lambda':
return self.q_lambda
elif name == 'x':
return self.q_x
elif name == 'mu':
return self.q_mu
else:
raise 'q_{:s} unknown!'.format(name)
def init(self):
"""Initialize factors for fitting."""
self.q_mu.mean = self.Y.mean(1)
def update_nu(self):
"""Update nu distribution."""
self.q_nu.update(self.HYPER, self.q_lambda)
def update_lambda(self, x_s=None):
"""Update lambda distribution.
Parameters
----------
x_s : :py:class:`numpy.ndarray`
sample weights
"""
self.q_lambda.update(self.HYPER, self.q_mu, self.q_nu, self.q_x,
self.Y, x_s=x_s)
def update_x(self):
"""Update x distribution."""
self.q_x.update(self.HYPER, self.q_lambda, self.q_mu, self.Y)
def update_mu(self, x_s=None):
"""Update mu distribution.
Parameters
----------
x_s : :py:class:`numpy.ndarray`
sample weights
"""
self.q_mu.update(self.HYPER, self.q_lambda, self.q_x, self.Y, x_s=x_s)
def update(self, names=['lambda', 'x', 'nu', 'mu'], **kwargs):
"""Update all distributions once in the given order.
Parameters
----------
names : list
Names of distribution to be updated
"""
if type(names) is str:
if names == 'nu':
self.update_nu()
elif names == 'lambda':
self.update_lambda(**kwargs)
elif names == 'mu':
self.update_mu(**kwargs)
elif names == 'x':
self.update_x()
else:
for name in names:
self.update(name, **kwargs)
def variance_explained(self, sort=False, norm=True):
"""Compute variance explained by factors.
Parameters
----------
sort : bool
Sort variance explained in descending order
norm : bool
Normalize variance explained to sum up to one
Returns
-------
variance_explained : float
Variance explained
"""
ve = np.array([l.dot(l) for l in self.q_lambda.mean.T])
if sort:
ve = np.sort(ve)[::-1]
if norm:
ve /= ve.sum()
return ve
def factors_order(self):
"""Return order of factors by their fraction of variance explained."""
ve = self.variance_explained()
return ve.argsort()[::-1]
def permute(self, order):
"""Permute factors in the given order.
Parameters
----------
order : :py:class:`numpy.ndarray`
Permutation order
"""
self.q_lambda.permute(order)
self.q_nu.permute(order)
self.q_x.permute(order)
def order_factors(self):
"""Orders factors by the fraction of variance explained."""
self.permute(self.factors_order())
class Hyper(object):
"""Class for model hyperparameters.
Parameters
----------
p : int
Dimension of the high-dimensional space
q : int
Dimension of the low-dimensional space
Attributes
----------
P : int
Dimension of the high-dimensional space
Q : int
Dimension of the low-dimensional space
a : float
Alpha parameter of gamma prior over factor matrix
b : float
Beta parameter of gamma prior over factor matrix
mu : :py:class:`numpy.ndarray`
P dimensional mean vector of normal prior over mu vector
nu : :py:class:`numpy.ndarray`
P dimensional precision vector of diagonal mu covariance matrix
psi : :py:class:`numpy.ndarray`
P dimensional precision vector of diagonal noise covariance matrix
"""
def __init__(self, p, q=None):
self.P = p
self.Q = p if q is None else q
self.a = 1.0
self.b = 1.0
self.mu = np.zeros(self.P)
self.nu = np.ones(self.P)
self.psi = np.ones(self.P) * 10.0
def __str__(self):
s = '\na: {:f}, b: {:f}'.format(self.a, self.b)
s += '\nmu: {:s}'.format(self.mu.__str__())
s += '\nnu: {:s}'.format(self.nu.__str__())
s += '\npsi: {:s}'.format(self.psi.__str__())
return s
class Nu(object):
"""Nu factor class.
Dirichlet distribution over factor matrix.
Parameters
----------
q : int
Rank (# columns) of factor matrix
Attributes
----------
Q : int
Rank (# columns) of factor matrix
a : float
Alpha parameter of Dirichlet distribution
b : float
Beta parameter of Dirichlet distribution
"""
def __init__(self, q):
self.Q = q
self.init()
def init(self):
"""Initialize parameters."""
self.a = 1.0
self.b = np.ones(self.Q)
def update(self, hyper, q_lambda):
"""Update parameter.
Parameters
----------
hyper : :py:class:`fa.Hyper`
Hyperparameters
q_lambda : :py:class:`fa.Lambda`
Factor matrix
"""
self.a = hyper.a + 0.5 * hyper.P
self.b.fill(hyper.b)
self.b += 0.5 * (np.sum(q_lambda.mean**2, 0) + np.diag(np.sum(q_lambda.cov, 0)))
assert np.all(self.b > hyper.b)
def __str__(self):
return 'a: {:f}\nb: {:s}'.format(self.a, self.b.__str__())
def expectation(self):
"""Return expectation of Dirichlet distribution."""
return self.a / self.b
def permute(self, order):
"""Permute factors in the given order.
Parameters
----------
order : :py:class:`numpy.ndarray`
Permutation order
"""
self.b = self.b[order]
class Mu(object):
"""Mu factor class.
Normal distribution over mu with diagonal covariance matrix.
Parameters
----------
p : int
dimension of mu vector
Attributes
----------
P : int
dimension of mu vector
mean : :py:class:`np.ndarray`
mean of Normal distribution
cov : :py:class:`np.ndarray`
diagonal of covariance matrix
"""
def __init__(self, p):
self.P = p
self.init()
def init(self):
"""Initialize parameters."""
self.mean = np.random.normal(loc=0.0, scale=1e-3, size=self.P)
self.cov = np.ones(self.P)
def __str__(self):
return 'mean:\n{:s}\ncov:\n{:s}'.format(self.mean.__str__(), self.cov.__str__())
def update(self, hyper, q_lambda, q_x, y, x_s=None):
"""Update parameters.
Parameters
----------
hyper : :py:class:`fa.Hyper`
Hyperparameters
q_lambda : :py:class:`fa.Lambda`
Factor matrix
q_x : :py:class:`fa.X`
Factor loadings matrix
x_s : :py:class:`numpy.ndarray`
Sample weights
"""
if x_s is None:
x_s = np.ones(q_x.N)
# cov
self.cov = hyper.nu + hyper.psi * np.sum(x_s)
self.cov = self.cov**-1
# mean
self.mean = np.multiply(hyper.psi, (y - q_lambda.mean.dot(q_x.mean)).dot(x_s)) + np.multiply(hyper.mu, hyper.nu)
self.mean = np.multiply(self.cov, self.mean)
class Lambda(object):
"""Lambda factor matrix class.
Normal distributions over P rows of lambda matrix.
Parameters
----------
p : int
# Rows of lambda matrix
q : int
# Columns of lambda matrix
Attributes
----------
P : int
# Rows of lambda matrix
Q : int
# Columns of lambda matrix
mean : :py:class:`numpy.ndarray`
Mean of lambda matrix
cov : :py:class:`numpy.ndarray`
P QxQ covariance matrices for all rows
"""
def __init__(self, p, q):
self.P = p
self.Q = q
self.init()
def init(self):
"""Initialize parameters."""
self.mean = np.random.normal(loc=0.0, scale=1.0, size=self.P * self.Q).reshape(self.P, self.Q)
self.cov = np.empty((self.P, self.Q, self.Q))
for p in range(self.P):
self.cov[p] = np.eye(self.Q)
def __str__(self, cov=False):
s = 'mean:\n{:s}'.format(self.mean.__str__())
if cov:
for p in range(self.P):
s += '\ncov[{:d}]:\n{:s}'.format(p, self.cov[p].__str__())
return s
def update(self, hyper, q_mu, q_nu, q_x, y, x_s=None):
"""Update parameters.
Parameters
----------
hyper : :py:class:`fa.Hyper`
Hyperparameters
q_mu : :py:class:`fa.Mu`
Mu distribution
q_nu : :py:class:`fa.Nu`
Nu distribution
q_x : :py:class:`fa.X`
X distribution
y : :py:class:`numpy.ndarray`
Data matrix
x_s : :py:class:`numpy.ndarray`
Sample weights
"""
if x_s is None:
x_s = np.ones(q_x.N)
# cov
assert np.all(q_nu.b > 0.0)
t = np.zeros((self.Q, self.Q))
for n in range(len(x_s)):
t += x_s[n] * (np.outer(q_x.mean[:, n], q_x.mean[:, n]) + q_x.cov)
tt = np.diag(q_nu.expectation())
self.cov = np.empty((self.P, self.Q, self.Q))
for p in range(self.P):
self.cov[p] = tt + hyper.psi[p] * t
self.cov[p] = np.linalg.inv(self.cov[p])
# mean
self.mean = np.empty((self.P, self.Q))
for p in range(self.P):
w = np.multiply(x_s, y[:][p] - q_mu.mean[p])
self.mean[p] = hyper.psi[p] * self.cov[p].dot(q_x.mean.dot(w))
def permute(self, order):
"""Permute factors in the given order.
Parameters
----------
order : :py:class:`numpy.ndarray`
Permutation order
"""
self.mean = self.mean[:, order]
for p in range(self.P):
self.cov[p] = self.cov[p, order, :]
self.cov[p] = self.cov[p, :, order]
class X(object):
"""X factor class.
Normal distributions over N columns of X matrix.
Parameters
----------
q : int
# Rows of X matrix
n : int
# Columns (# samples) of X matrix
Attributes
----------
Q : int
# Rows of X matrix
N : int
# Columns (# samples) of X matrix
mean : :py:class:`numpy.ndarray`
QxN mean of X matrix
cov : :py:class:`numpy.ndarray`
QxQ covariance matrix shared for all N columns (samples)
"""
def __init__(self, q, n):
self.Q = q
self.N = n
self.init()
def init(self):
"""Initialize parameters."""
self.mean = np.random.normal(loc=0.0, scale=1.0, size=self.Q * self.N).reshape(self.Q, self.N)
self.cov = np.eye(self.Q)
def update(self, hyper, q_lambda, q_mu, y):
"""Update parameters.
Parameters
----------
hyper : :py:class:`fa.Hyper`
Hyperparameters
q_lambda : :py:class:`fa.Lambda`
Lambda distribution
q_mu : :py:class:`fa.Mu`
Mu distribution
y : :py:class:`numpy.ndarray`
Data matrix
"""
# cov
self.cov = np.eye(self.Q) + np.multiply(q_lambda.mean.transpose(), hyper.psi).dot(q_lambda.mean)
for p in range(len(hyper.psi)):
self.cov += hyper.psi[p] * q_lambda.cov[p]
self.cov = np.linalg.inv(self.cov)
# mean
self.mean = self.cov.dot(np.multiply(q_lambda.mean.transpose(), hyper.psi).dot(y - q_mu.mean[:, np.newaxis]))
def __str__(self):
return 'mean:\n{:s}\ncov:\n{:s}'.format(self.mean.transpose().__str__(), self.cov.__str__())
def permute(self, order):
""" Permute factors in the given order.
Parameters
----------
order : :py:class:`numpy.ndarray`
Permutation order
"""
self.mean = self.mean[order, :]
self.cov = self.cov[order, :]
self.cov = self.cov[:, order]
| UTF-8 | Python | false | false | 16,131 | py | 8 | fa.py | 2 | 0.516893 | 0.514165 | 0 | 584 | 26.621575 | 120 |
snowch/movie-recommender-demo | 94,489,281,831 | 4af7095f46f9e8fc06507c6a4bae4d0353d052b1 | cdeaf628ab6d7ba44620a52a8917170034874204 | /web_app/app/main/forms.py | bb58b4e0a3b5a70b53e8845d6bbe1f2019c57f60 | [
"Apache-2.0",
"CC-BY-SA-3.0"
]
| permissive | https://github.com/snowch/movie-recommender-demo | 4a3520b3c707a872d93bc2f887b89d128ab74284 | 8d945ae526ec9e6cf4d5db147eb8fed70a779187 | refs/heads/master | 2023-04-28T00:12:35.565741 | 2022-06-22T23:10:46 | 2022-06-22T23:10:46 | 78,864,895 | 92 | 55 | Apache-2.0 | false | 2023-04-17T20:14:18 | 2017-01-13T16:26:11 | 2023-04-06T19:54:18 | 2023-04-17T20:14:17 | 29,918 | 70 | 44 | 4 | Jupyter Notebook | false | false | from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
class SearchForm(Form):
search_string = StringField('Enter search string')
submit = SubmitField('Submit')
| UTF-8 | Python | false | false | 232 | py | 53 | forms.py | 17 | 0.780172 | 0.780172 | 0 | 7 | 32 | 54 |
jacobdadams/arcpy_scripts | 6,588,479,843,799 | e835649372c033a7476c14efb3a27519af083e38 | 45acdaaf208952c759902e3178138cbd1171194a | /public_notice.py | 2470131d242e39ca1e0a45c6a8260b83b094187a | [
"MIT"
]
| permissive | https://github.com/jacobdadams/arcpy_scripts | cb179e85d2cbc9eea548d320f34b9f0f2f86be54 | 309cc6f6c9679a9060b17d961b8c83bbd142e6a5 | refs/heads/master | 2020-04-01T22:05:24.917234 | 2018-09-11T23:09:53 | 2018-09-11T23:09:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #*****************************************************************************
#
# Project: Major Land Use Project Feature Creation GP Script Tool
# Purpose: Interface for planning staff to create major land use project
# features and automatically generate aerial map, vicinity map, and
# mailing list.
# Author: Jacob Adams, jacob.adams@cachecounty.org
#
#*****************************************************************************
# MIT License
#
# Copyright (c) 2018 Cache County
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#*****************************************************************************
import arcpy
import re
import os
import csv
import sys
import traceback
TIDs = arcpy.GetParameterAsText(0) # Multivalue paramter
project_type = arcpy.GetParameterAsText(1) # Specify values in script tool
project_name = arcpy.GetParameterAsText(2)
project_address = arcpy.GetParameterAsText(3)
request_summary = arcpy.GetParameterAsText(4)
meeting_date = arcpy.GetParameterAsText(5)
sr_link = arcpy.GetParameterAsText(6)
status = arcpy.GetParameterAsText(7) # Again, specify values
buffer_distance = arcpy.GetParameterAsText(8)
parcel_layer = arcpy.GetParameterAsText(9) # Layer of the mxd
projects_layer = arcpy.GetParameterAsText(10) # Layer of the mxd
# Parameters 11-13 used as output parameters below
mxd_file = arcpy.GetParameterAsText(14)
lua = arcpy.GetParameterAsText(15) # Again, specify values
solo_table = arcpy.GetParameterAsText(16) # Table in mxd
# Parameter 17 used as message/error parameter below
# Previous notes below left here for posterity's sake
# ---------------------------------------------------
# Set up paths to connection file for feature classes
# SDE connection file is placed in "path" folder that is in the same directory
# as the script. When shared as a GP service, "path" and the connection file
# will be uploaded to the server. the GP service will then use the connection
# file to access the SDE.
# script_dir = sys.path[0]
# folder = os.path.join(script_dir, "path")
# parcel_fc = os.path.join(folder, "path/to/fc")
# project_fc = os.path.join(folder, path/to/fc")
# Using references to the layers instead of direct links to the SDE presents the following problems:
# 1. If mapping stuff is commented out, geoprocessing fails to stage with a consolidating data error.
# 2. If mxd is refferred to using "CURRENT": error, broken link and not in data store
# 3. If mxd is reffered to using direct UNC path, it's not on the data store
# 4. If mxd is referred to using parameter, it's not in the data store.
# Can't verify, but my guess is that even if I could get the mxd stuff figured out, the layers would still bomb out at staging (#1)
# ------------------------------------------------------
# Set up variables
# MultiValue parameters come in as a single long string with each entry
# separated by a ";". split(";") creates a list by spliting on the ";"
TID = TIDs.split(";")
temp_fc = "in_memory\\temp_fc"
surrounding_parcels_fc = "in_memory\\surrounding_fc"
table_view = "assessor_table_view"
parcel_tid_field = "tax_id"
table_tid_field = "parcel_number"
address_fields_list = ["parcel_number", "owner_name", "owner_address1", "owner_city_state_zip"]
fields = {
"projecttype" : "TEXT",
"projectname" : "TEXT",
"projectaddress" : "TEXT",
"projectsummary" : "TEXT",
"nextmeeting" : "DATE",
"staffreport" : "TEXT",
"status" : "TEXT",
"parcelids" : "TEXT",
"active" : "TEXT",
"landuseauthority" : "TEXT"
}
field_list = ["projecttype", "projectname", "projectaddress",
"projectsummary", "nextmeeting", "staffreport", "status", "parcelids",
"active", "landuseauthority"]
verified_parcels = []
messages = []
try:
arcpy.AddMessage("Creating LU Project Polygon...")
messages.append("Creating LU Project Polygon...")
# Clear any selections and in_memory objects for safety
arcpy.SelectLayerByAttribute_management(parcel_layer, "CLEAR_SELECTION")
arcpy.Delete_management("in_memory")
# Regex pattern for parcel IDs
pattern = "[0-9]{2}-[0-9]{3}-[0-9]{4}"
# Make sure parcel numbers are valid
for tid in TID:
if tid and tid != "#":
# Make sure the parcel ID is formatted correctly
if not re.match(pattern, tid):
raise ValueError("Input Parcel IDs must be in the format " +
"YY-YYY-YYYY, where Y is a single digit number." +
" For example, 06-019-0009.")
# Make sure parcel ID is a valid parcel
where = parcel_tid_field + " = '" + tid + "'"
with arcpy.da.SearchCursor(parcel_layer, parcel_tid_field, where) as search_cursor:
if sum(1 for _ in search_cursor) < 1: #sums number of records
raise ValueError("Cannot find parcel ID " + tid + " in parcel " +
"list.")
# Check for any characters in the project name that would cause havok with the file system
file_pattern = r'[<>:"/\|?*]+'
if re.search(file_pattern, project_name):
raise ValueError("Please enter a different project name that does not contain the following characters: <>:\"/\\|?*")
# Wrap parcel id's in single quotes for where clauses
parcel_list = ["\'%s\'" %(p) for p in TID]
# Set definition query
if len(parcel_list) > 1:
tid_string = ", ".join(parcel_list)
elif len(parcel_list) == 1:
tid_string = parcel_list[0]
elif len(parcel_list) < 1:
raise ValueError("No parcels specified.")
dq = parcel_tid_field + " IN (" + tid_string + ")"
# Add all desired parcels to selection
arcpy.SelectLayerByAttribute_management(parcel_layer, "ADD_TO_SELECTION", dq)
# Dissolve parcels (if needed) into temporary feature class
arcpy.Dissolve_management(parcel_layer, temp_fc)
# Add fields to temporary feature class
for field, ftype in fields.iteritems():
if ftype is "TEXT":
arcpy.AddField_management(temp_fc, field, ftype, field_length=400)
else:
arcpy.AddField_management(temp_fc, field, ftype)
# Update fields with info from parameters to temporary feature class
with arcpy.da.UpdateCursor(temp_fc, field_list) as update_cursor:
for row in update_cursor:
row[0] = project_type
row[1] = project_name
row[2] = project_address
row[3] = request_summary
row[4] = meeting_date
row[5] = sr_link
row[6] = status
row[7] = ", ".join(TID)
row[8] = "Yes"
row[9] = lua
update_cursor.updateRow(row)
# Append merged parcel to Project FC
arcpy.Append_management(temp_fc, projects_layer, "NO_TEST")
arcpy.AddMessage("Creating mailing list...")
messages.append("Creating mailing list...")
# ============= Create public notice mailing lists =============
# Select nearby features (assumes parcel_layer selection is still valid)
selection = arcpy.SelectLayerByLocation_management(parcel_layer, overlap_type = "WITHIN_A_DISTANCE",
select_features = parcel_layer,
search_distance = buffer_distance,
selection_type = "NEW_SELECTION")
# Get nearby parcel IDs
nearby_parcels = []
with arcpy.da.SearchCursor(parcel_layer, parcel_tid_field) as parcel_cursor:
nearby_parcels = ["\'%s\'" %(r[0]) for r in parcel_cursor]
# Table definition query
if len(nearby_parcels) > 1:
table_tid_string = ", ".join(nearby_parcels)
elif len(nearby_parcels) == 1:
table_tid_string = nearby_parcels[0]
else:
table_tid_string = ""
table_where = "%s IN (%s)" %(table_tid_field, table_tid_string)
# Make table view with subsetted entries
arcpy.MakeTableView_management(solo_table, table_view, table_where)
# ========= Write out to csv ===========
arcpy.AddMessage("Creating CSV...")
messages.append("Creating CSV...")
# Create CSV of records from new feature class
csv_file = os.path.join(arcpy.env.scratchFolder, "Addresses.csv")
with open(csv_file, 'w') as csvfile:
csvfile.write("sep=|\n")
writer = csv.writer(csvfile, delimiter='|', lineterminator='\n')
with arcpy.da.SearchCursor(table_view, field_names=address_fields_list) as cursor:
writer.writerow(address_fields_list)
for row in cursor:
writer.writerow(row)
# Sends path of the csv file back to the service handler
arcpy.SetParameter(11, csv_file)
arcpy.AddMessage("Setting up mxd for mapping...")
messages.append("Setting up mxd for mapping...")
# ============= Create Overview and Aerial maps for staff Report =============
# Clear selection to avoid selection symbology in exported maps
arcpy.SelectLayerByAttribute_management(parcel_layer, "CLEAR_SELECTION")
# Get the map document, data frame, and layers
arcpy.AddMessage("MXD Path: " + mxd_file)
mxd = arcpy.mapping.MapDocument(mxd_file)
df = arcpy.mapping.ListDataFrames(mxd)[0]
layers = arcpy.mapping.ListLayers(mxd)
for l in layers:
if l.name == "Aerial Parcels":
a_layer = l
elif l.name == "Vicinity Parcels":
v_layer = l
elif l.name == "Imagery":
i_layer = l
# Uses definition query created earlier
a_layer.definitionQuery = dq
v_layer.definitionQuery = dq
arcpy.AddMessage("Creating vicinity map...")
messages.append("Creating vicinity map...")
# Vicinity Map: turn on vicinity parcels, turn off imagery, zoom to layer, add 10k to extent, export to jpg @ 600dpi
v_layer.visible = True
a_layer.visible = True
i_layer.visible = False
df.extent = v_layer.getExtent() # Set extent to match layers
df.scale = df.scale + 10000 # Add 10k to scale to give us the vicinity view
out_path_v = os.path.join(arcpy.env.scratchFolder, project_name + " Vicinity.jpg")
arcpy.mapping.ExportToJPEG(mxd, out_path_v, resolution=600)
arcpy.SetParameter(12, out_path_v)
arcpy.AddMessage("Creating aerial map...")
messages.append("Creating aerial map...")
# Aerial Map: turn off vicinity parcels, turn on imagery, zoom to layer, export
v_layer.visible = False
a_layer.visible = True
i_layer.visible = True
df.extent = v_layer.getExtent() # Set extent to match layers
df.scale += 200 # Add 200 to scale to give a little bit of space at the edges
# # Use Logan's image service for imagery...
# server_url = "http://gis.loganutah.org/arcgis/services/Ortho/Ortho2016_Cache/ImageServer"
# layer_name = "in_memory\\imagery_layer"
#
# # Calculate new extent for imagery
# # New extent is delta_x map units wider, where delta_x = map distance * new scale - original width
# # To center new extent, add/subtract by delta_x by 2 (and similar for delta_y and height)
# x_md = df.elementWidth / mxd.pageSize.width # Map distance in feet is df width / mxd width (both in inches)
# y_md = df.elementHeight / mxd.pageSize.height
# delta_x = x_md * df.scale - a_layer.getExtent().width
# delta_y = y_md * df.scale - a_layer.getExtent().height
#
# xmin = a_layer.getExtent().XMin - (delta_x / 2.0)
# xmax = a_layer.getExtent().XMax + (delta_x / 2.0)
# ymin = a_layer.getExtent().YMin - (delta_y / 2.0)
# ymax = a_layer.getExtent().YMax + (delta_y / 2.0)
# ex = arcpy.Extent(xmin, ymin, xmax, ymax)
#
# arcpy.MakeImageServerLayer_management(server_url, layer_name, ex)
# image_layer = arcpy.mapping.Layer(layer_name)
# arcpy.mapping.InsertLayer(df, i_layer, image_layer)
# image_layer.visible = True
out_path_a = os.path.join(arcpy.env.scratchFolder, project_name + " Aerial.jpg")
arcpy.mapping.ExportToJPEG(mxd, out_path_a, resolution=600)
arcpy.SetParameter(13, out_path_a)
del mxd
except ValueError as ve:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "\nArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
# Log the errors as warnings on the server (adding as errors would cause the task to fail)
arcpy.AddWarning(pymsg)
arcpy.AddWarning(msgs)
# Tell the user to use a valid Parcel ID
messages.append(" --- ERROR: ")
messages.append(ve.args[0])
except Exception as ex:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "\nArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
# Log the errors as warnings on the server (adding as errors would cause the task to fail)
arcpy.AddWarning(pymsg)
arcpy.AddWarning(msgs)
messages.append(ex.args[0])
# Sometimes the database state changes while adding the polygon (someone
# saves edits, etc). The GP service doesn't handle that all that well
# (without going through the hassle of an edit session). This manifests as
# the script failing once, and then future attempts seem to succeed but
# don't add polygons. The solution is to restart the service and add them
# again.
if "version has been redefined" in ex.args[0]:
messages.append("\n")
messages.append(" --- Error adding project polygon. Please ask GIS to restart the Geoprocessing Service before trying again. --- ")
messages.append("\n")
# Sometimes the call to add the imagery from Logan City times out. The
# polygons get created, but the map fails. The solution is to run the tool
# again to add tempory projects that have the same boundaries, thus
# creating the maps, and then delete the temporary projects.
elif "Failed to get raster" in ex.args[0]:
messages.append(" --- Error creating aerial overview map. Please create a new, temporary polygon to recreate the maps and mailing list, and then delete the temporary polygon. --- ")
finally:
output_string = "\n".join(messages)
arcpy.SetParameterAsText(17, output_string)
# Be a good citizen and delete the in_memory workspace
arcpy.Delete_management("in_memory")
| UTF-8 | Python | false | false | 15,797 | py | 10 | public_notice.py | 8 | 0.647022 | 0.638539 | 0 | 356 | 43.373596 | 189 |
momongaclub/crowdsourcing_site | 8,761,733,324,724 | 6ee951afedd41c82986bb3bd0bbf19c6c716374b | 9a8dbf8069a86218307451990187a80aad3bafce | /flask/database/samples/check_table.py | 516066f3222e4a38e5cdec28471f3ea6361c6e6a | []
| no_license | https://github.com/momongaclub/crowdsourcing_site | 41c0bd67ed07f566d58d1887966f12bbe2562260 | 458b90cc1a98ab2a50ae71012d1fa7f3d6e9100d | refs/heads/master | 2022-12-29T19:51:19.708078 | 2020-10-14T04:44:05 | 2020-10-14T04:44:05 | 300,500,975 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
dbname = './RAP_CORPUS.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
# terminalで実行したSQL文と同じようにexecute()に書く
select_sql = 'SELECT * FROM corpus'
for row in cur.execute(select_sql):
print(row)
# 中身を全て取得するfetchall()を使って、printする。
cur.close()
conn.close()
| UTF-8 | Python | false | false | 345 | py | 18 | check_table.py | 11 | 0.729537 | 0.72242 | 0 | 14 | 19.071429 | 37 |
plone/plone.protect | 16,329,465,673,180 | 3ae1d7a1ddec9167168f247e511ba6df7ecd1ab8 | 80aca887f7cf2e7e44779b3162250e7c34622198 | /plone/protect/tests/testPostOnly.py | 7654fb1509f71921c968f6669d7758325a2bd8f4 | []
| no_license | https://github.com/plone/plone.protect | 923bce28dd10d5dc1a0ff4b4188d5f139151c26e | c0f18c099895143620a5e19a27a797b4097f329b | refs/heads/master | 2023-08-03T09:43:36.198938 | 2023-07-04T06:06:33 | 2023-07-04T06:06:33 | 2,432,541 | 6 | 7 | null | false | 2023-07-04T06:06:35 | 2011-09-21T20:17:50 | 2023-04-09T16:25:05 | 2023-07-04T06:06:34 | 364 | 7 | 8 | 12 | Python | false | false | from plone.protect.postonly import check
from unittest import makeSuite
from unittest import TestCase
from unittest import TestSuite
from zExceptions import Forbidden
from ZPublisher.HTTPRequest import HTTPRequest
class PostOnlyTests(TestCase):
def makeRequest(self, method):
return HTTPRequest(
None,
dict(REQUEST_METHOD=method, SERVER_PORT="80", SERVER_NAME="localhost"),
None,
)
def testNonHTTPRequestAllowed(self):
check("not a request")
def testGETRequestForbidden(self):
self.assertRaises(Forbidden, check, self.makeRequest("GET"))
def testPOSTRequestAllowed(self):
check(self.makeRequest("POST"))
def test_suite():
suite = TestSuite()
suite.addTest(makeSuite(PostOnlyTests))
return suite
| UTF-8 | Python | false | false | 806 | py | 24 | testPostOnly.py | 18 | 0.702233 | 0.699752 | 0 | 30 | 25.866667 | 83 |
Junnian/patend | 15,315,853,394,279 | a639d3cc7edb738156a681ad7e80ac05e5f4cded | e93e9ff56ab53365bf6300e69eb04cde3198ddae | /getpatent/getpatent/spiders/patent.py | a2c981ef73a67f9b0c0f9c38367f0eca7476fc66 | []
| no_license | https://github.com/Junnian/patend | d8d1d9ef038e64ac7d57d9757f6e61b667747c9f | 41da8d15428af23c59db63c161bf780dbda61958 | refs/heads/master | 2021-08-31T16:31:50.257941 | 2017-12-22T02:58:53 | 2017-12-22T02:58:53 | 112,728,176 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from getpatent.items import GetpatentItem
from scrapy.http import Request
from scrapy.selector import Selector
import time
from random import random
from getpatent.settings import FIELD,key1s,key2s
from getpatent.spiders.parse_info import get_info,write_relpatents
'''
aerospace第五页
https://www.google.com.hk/search?q=aerospace&tbm=pts&start=40
Biomedical engineering空格不用加下滑线
'''
Url = 'https://www.google.com'
import os,sys
print "__file__=%s" % __file__
outfilename = 'relpatents.txt'
class ScholarSpider(scrapy.Spider):
name = 'patent'
allowed_domains = ['www.google.ca','www.google.com']
start_urls = []
for key in key1s:
file_ = 'new/'+key+'.txt'
with open(file_,'r') as f:
list_ = f.readlines()
for i in list_:
start_urls.append(i)
if ' ' in i:
i=i.replace(' ','_')
start_urls.append(i)#也就是说有下划线,没下划线的都要
for key in key2s:
file_ = 'key/'+key+'.txt'
with open(file_,'r') as f:
list_ = f.readlines()
for i in list_:
start_urls.append(i)
if ' ' in i:
i=i.replace(' ','_')
start_urls.append(i)#也就是说有下划线,没下划线的都要
scrawl_ID = set(start_urls) # 记录待爬
finish_ID = set() # 记录已爬
peopleUrl = set() #记录已经爬的主页
#下一个领域的开始放在前一个领域之后
def start_requests(self):
#只进行一次
if self.scrawl_ID.__len__():
print self.scrawl_ID.__len__()
field = self.scrawl_ID.pop()
self.finish_ID.add(field) # 加入已爬队列
# field = 'aerospace'
# https://www.google.com.hk/search?q=aerospace&tbm=pts&start=40
url = 'https://www.google.com/search?q='+field+'&tbm=pts&start=00'+'&sa=N'
# yield Request(url=url, callback=self.parse1)
req = Request(url= url,callback = self.parse1)
yield req
def parse1 (self, response):
#这个解析函数先处理每个领域第一页的人,用selector
#得到专利详情页
sel = Selector(response)
detailurls = sel.xpath('//*[@class="r"]//a[contains(@href,"/patents/")]/@href').extract()
reqs = []
for url in detailurls:
yield Request(url = url,callback = self.parse_info,dont_filter=True)
# yield Request(url = url,callback = parse_info,dont_filter=True)
nexturl = sel.xpath('//*[text()="Next"]/parent::*/@href').extract()
nexturl2 = sel.xpath('//*[text()="Next"]/@href').extract()
nexturl_1= response.xpath('//*[@class="b"]/a/@href').extract()
nexturl_2 = response.xpath('//*[@class="b navend"]/a/@href').extract()
print('---------------nexturl--------------nexturl',nexturl)
print('---------------nexturl2--------------nnexturl2',nexturl2)
print('-----------nexturl_1----------------nexturl_1',nexturl_1)
print('-----------nexturl_2----------------nexturl_2',nexturl_2)
#开是构造下一页链接
url = 0
if len(nexturl)==1:
url = Url+nexturl[0]
# yield Request(url = Url+url,callback = self.parse1,dont_filter=True)
elif len(nexturl2)==1:
print('-------------222---------------nexturl2',nexturl2)
if nexturl2:
url = Url+nexturl2[0]
# yield Request(url = Url+url,callback = self.parse1,dont_filter=True)
elif len(nexturl_2)==1:
print('-----------5555----------------nexturl_2',nexturl_2)
url = Url+nexturl_2[0]
if url==0: #就是前边的几个判断都没有提取到url,就只能强行拼一个
print('-----------8----------------')
nowurl = response.url
#得到下一页页码
a = nowurl.split('start=')
b = a[-1].split('&')
c = b[0]
N = int(c)
N = N+10
#得到当前关键词
a = nowurl.split('q=')
b = a[1].split('&')
key = b[0]
url = 'https://www.google.com.hk/search?q='+key+'&tbm=pts&start='+str(N)+'&sa=N'
filename = response.url+'.txt'
with open('filename','a+') as f:
f.write('1111')
f.write(response.body)
yield Request(url = url,callback = self.parse2,dont_filter=True)
def parse2(self,response):
#得到专利详情页
sel = Selector(response)
detailurls = sel.xpath('//*[@class="r"]//a[contains(@href,"/patents/")]/@href').extract()
reqs = []
for url in detailurls:
yield Request(url = url,callback = self.parse_info,dont_filter=True)
# yield Request(url = url,callback = parse_info,dont_filter=True)
nexturl = sel.xpath('//*[text()="Next"]/parent::*/@href').extract()
nexturl2 = sel.xpath('//*[text()="Next"]/@href').extract()
nexturl_1= response.xpath('//*[@class="b"]/a/@href').extract()
nexturl_2 = response.xpath('//*[@class="b navend"]/a/@href').extract()
print('---------------nexturl--------------nexturl',nexturl)
print('---------------nexturl2--------------nnexturl2',nexturl2)
print('-----------nexturl_1----------------nexturl_1',nexturl_1)
print('-----------nexturl_2----------------nexturl_2',nexturl_2)
#开是构造下一页链接
url = 0
if len(nexturl)==1:
url = Url+nexturl[0]
yield Request(url = url,callback = self.parse2,dont_filter=True)
elif len(nexturl_1)==2:
print('-----------3----------------nexturl_1',nexturl_1)
url = Url+nexturl_1[1]
yield Request(url = url,callback = self.parse2,dont_filter=True)
elif len(nexturl_2)==2:
print('-----------4444----------------nexturl_2',nexturl_2)
url = nexturl_2[1]
yield Request(url = Url+url,callback = self.parse2,dont_filter=True)
elif len(nexturl_2)==0 and len(nexturl_1)==0 : #就是前边的几个判断都没有提取到url,就只能强行拼一个
print('--q---------8----------------')
nowurl = response.url
#得到下一页页码
a = nowurl.split('start=')
b = a[-1].split('&')
c = b[0]
N = int(c)
N = N+10
#得到当前关键词
a = nowurl.split('q=')
b = a[1].split('&')
key = b[0]
url = 'https://www.google.com.hk/search?q='+key+'&tbm=pts&start='+str(N)+'&sa=N'
filename = response.url+'.txt'
with open('filename','a+') as f:
f.write('1111')
f.write(response.body)
yield Request(url = url,callback = self.parse2,dont_filter=True)
#判断当前页是不是最后一页
if len(detailurls)==0:#就这样吧,能跑起来就行
if self.scrawl_ID.__len__():#如果待爬的领域里面还有则继续下一个领域
field = self.scrawl_ID.pop()
self.finish_ID.add(field) # 加入已爬队列
# field = 'aerospace'
# https://www.google.com.hk/search?q=aerospace&tbm=pts&start=40
url = 'https://www.google.com/search?q='+field+'&tbm=pts&start=00'+'&sa=N'
# yield Request(url=url, callback=self.parse1)
req = Request(url= url,callback = self.parse1)
with open('next.txt','a+') as f:
f.write(field)
f.write('\n')
yield req
def parse_info(self,response):
item = GetpatentItem()
item['Url'] = response.url
sel = Selector(response)
information = get_info(sel)
item['Title'] = information['Title']
item['Abstract'] =information['Abstract']
item['Publication_number'] = information['Publication_number']
item['Publication_type'] = information['Publication_type']
item['Publication_date'] =information['Publication_date']
item['Original_Assignee'] = information['Original_Assignee']
item['Filing_date'] = information['Filing_date']
item['Application_number'] = information['Application_number']
item['Priority_date'] = information['Priority_date']
item["Inventors"] = information["Inventors"]
item['Applicant'] = information['Applicant']
item["Export_Citation"] = information["Export_Citation"]
item["Also_published_as"] = information["Also_published_as"]
item['External_Links'] = information['External_Links']
item['Cited_patent'] = information['Cited_patent']
item['Referenced_by'] = information['Referenced_by']
item['Classification'] = information['Classification']
write_relpatents(outfilename,information['rel_patents'])
yield item | UTF-8 | Python | false | false | 9,286 | py | 7 | patent.py | 6 | 0.520205 | 0.506164 | 0 | 220 | 38.822727 | 97 |
XGBTrain5/01.other | 5,849,745,487,530 | d278a481af12a5d4ffcbdb2c9e3a1edc34132ecd | c4776d74c289e971b5424bcb7fc1d13aecd29f1b | /20-10-21/oop_with_python-master/10-super/40-super-1.py | 4eeea91bb8c6e469317ad831d8c4ae97fcc8675e | [
"MIT"
]
| permissive | https://github.com/XGBTrain5/01.other | 147d7cdb332b5ffde1f6b067e44737cc75e905ca | 5a9170108e1546549ed5c31935c351d8726fac81 | refs/heads/master | 2023-08-27T23:07:37.474892 | 2021-11-08T13:55:46 | 2021-11-08T13:55:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# 40-super-1.py
# This is an example on how super() works
# in Inheritance.
# For more step-by-step details, refer :
# https://arvimal.wordpress.com/2016/07/01/inheritance-and-super-object-oriented-programming/
class MyClass(object):
def func(self):
print("I'm being called from the Parent class")
class ChildClass(MyClass):
def func(self):
print("I'm actually being called from the Child class")
print("But...")
# Calling the `func()` method from the Parent class.
super(ChildClass, self).func()
my_instance_2 = ChildClass()
my_instance_2.func()
| UTF-8 | Python | false | false | 626 | py | 49 | 40-super-1.py | 41 | 0.669329 | 0.648562 | 0 | 26 | 23.076923 | 93 |
Aasfga/matroids-library | 7,541,962,578,378 | fcd3dbd226e9e86bf184bdf64842df488cc9a6f9 | 5a7e80463921cab5bb87c3002ae6f97cd8711f51 | /tests/test_uniform_matroid.py | d28e40fbc230ffcbda6bc1188603c65490571185 | [
"MIT"
]
| permissive | https://github.com/Aasfga/matroids-library | 2dcbddde3028c692ff500ca652dcba81e90d3bee | 468f6fdc4b0c0e93346dba7365fae0fc6993f9cf | refs/heads/master | 2020-07-02T21:25:38.257540 | 2019-09-07T18:37:13 | 2019-09-07T18:37:13 | 201,670,089 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from unittest import TestCase
from uniform_matroid import UniformMatroid
from itertools import combinations
from numpy import array_equal
class TestUniformMatroid(TestCase):
def setUp(self) -> None:
self.size = 10
self.universe = set(range(self.size))
self.rank = 5
self.matroid = UniformMatroid(self.universe, self.rank)
def tearDown(self) -> None:
self.universe = None
self.rank = None
self.matroid = None
def test_rank(self):
self.assertEqual(self.matroid.rank, self.rank)
def test_size(self):
self.assertEqual(self.matroid.size, len(self.universe))
def test_matrix(self):
matrix = self.matroid.matrix
field = self.matroid.field
self.assertEqual(matrix.shape, (self.rank, self.size), "Matrix has wrong shape")
self.assertGreater(field.characteristic(), self.matroid.size, "Field isn't bigger than universe")
unique_row = matrix[1]
self.assertEqual(len(set(unique_row)), len(unique_row), "Unique field elements are not unique")
self.assertFalse(np.any(matrix == field.zero), "Zero is in matrix")
matrix[0][0] = field.zero
new_matrix = self.matroid.matrix
self.assertFalse(array_equal(matrix, new_matrix), "Matrix wasn't copied")
def test_is_independent(self):
s = set(next(combinations(self.universe, self.rank)))
self.assertTrue(self.matroid.is_independent(s), "Independent set is dependent")
s = set(next(combinations(self.universe, self.rank + 1)))
self.assertFalse(self.matroid.is_independent(s), "Dependent set is independent")
s = {'a', 'b', 'c', 'd'}
with self.assertRaises(ValueError):
self.matroid.is_independent(s)
def test_mapping(self):
mapping = self.matroid.mapping
keys_checker = list(range(self.size))
keys = list(mapping.keys())
self.assertEqual(keys, keys_checker)
values_checker = list(self.universe)
values = list(mapping.values())
self.assertEqual(values, values_checker)
| UTF-8 | Python | false | false | 2,118 | py | 21 | test_uniform_matroid.py | 19 | 0.654863 | 0.651558 | 0 | 54 | 38.222222 | 105 |
wiltonvgc/projeto2 | 10,307,921,511,795 | cdd2991d8f74958712769377446f87dd3e1f8caa | 78d08e4608062b15abdb6f2682f88336e4eb6373 | /lab2/lab2.py | 287c0a8a722d9217530f551c220314621bdc2b6e | []
| no_license | https://github.com/wiltonvgc/projeto2 | 5292c5622cd226b0bbf5c66586b141b1fc22b599 | d1cfbd28e39144aa20bfd7b1e93d58a5be066ad5 | refs/heads/master | 2021-01-19T13:47:03.315667 | 2017-02-21T01:48:54 | 2017-02-21T01:48:54 | 82,418,820 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.log import lg, info, setLogLevel
from mininet.util import dumpNodeConnections, quietRun, moveIntf
from mininet.cli import CLI
from mininet.node import Switch, OVSKernelSwitch
from subprocess import Popen, PIPE, check_output
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import sys
import os
import termcolor as T
import time
class Topologia(Topo):
def __init__(self):
super(Topologia, self ).__init__()
#Criacao dos hosts
pc11 = self.addNode('pc11',ip='192.168.1.1')#host pc1.1
pc12 = self.addNode('pc12',ip='192.168.1.2')#host pc1.2
pc21 = self.addNode('pc21',ip='192.168.2.1')#host pc2.1
pc22 = self.addNode('pc22',ip='192.168.2.2')#host pc2.2
pc31 = self.addNode('pc31',ip='192.168.3.1')#host pc3.1
pc32 = self.addNode('pc32',ip='192.168.3.2')#host pc3.2
pc81 = self.addNode('pc81',ip='192.168.8.1')#host pc8.1
pc82 = self.addNode('pc82',ip='192.168.8.2')#host pc8.2
pc91 = self.addNode('pc91',ip='192.168.9.1')#host pc9.1
pc92 = self.addNode('pc92',ip='192.168.9.2')#host pc9.2
#Criacao dos switchs
sL1 = self.addSwitch('sL1')#switch LAN1
sL2 = self.addSwitch('sL2')#switch LAN2
sL3 = self.addSwitch('sL3')#switch LAN3
sL8 = self.addSwitch('sL8')#switch LAN8
sL9 = self.addSwitch('sL9')#switch LAN9
#Criacao dos roteadores A e B
r1 = self.addSwitch('r1',type='Router')#roteador rA
r2 = self.addSwitch('r2',type='Router')#roteador rB
#Criacao dos links
self.addLink(pc11,sL1)
self.addLink(pc12,sL1)
self.addLink(pc21,sL2)
self.addLink(pc22,sL2)
self.addLink(pc31,sL3)
self.addLink(pc32,sL3)
self.addLink(pc81,sL8)
self.addLink(pc82,sL8)
self.addLink(pc91,sL9)
self.addLink(pc92,sL9)
self.addLink(sL1,r1)
self.addLink(sL2,r1)
self.addLink(sL3,r1)
self.addLink(sL8,r2)
self.addLink(sL9,r2)
self.addLink(r1,r2)
return
def getIP(host):
#IP de host PC
if('pc' in host):
ip = '192.168.'
ip = ip + host[2] + '.' + host[3] + '/24'
return ip
def getGateway(host):
#Gateway de Host PC
if('pc' in host):
ip = '192.168.' + host[2] + '.254/24'
return ip
def main():
os.system("rm -f /tmp/r*.log /tmp/r*.pid logs/*")
os.system("mn -c >/dev/null 2>&1")
os.system("killall -9 zebra > /dev/null 2>&1")
os.system('pgrep -f webserver.py | xargs kill -9')
net = Mininet(topo=Topologia())
net.start()
#Seta roteador rA e rB
for router in net.switches:
if(router.name=='r1' or router.name=='r2'):
router.cmd("sysctl -w net.ipv4.ip_forward=1")
router.waitOutput()
#Configura interface do roteador rA e rB com zebra
for router in net.switches:
if(router.name=='r1' or router.name=='r2'):
router.cmd("/usr/lib/quagga/zebra -f conf/zebra-%s.conf -d -i /tmp/zebra-%s.pid > logs/%s-zebra-stdout 2>&1" % (router.name, router.name, router.name))
router.waitOutput()
#router.cmd("/usr/lib/quagga/bgpd -f conf/bgpd-%s.conf -d -i /tmp/bgp-%s.pid > logs/%s-bgpd-stdout 2>&1" % (router.name, router.name, router.name), shell=True)
#router.waitOutput()
CLI(net)
net.stop()
os.system("killall -9 zebra")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,275 | py | 4 | lab2.py | 3 | 0.66229 | 0.589008 | 0 | 114 | 27.72807 | 161 |
bird-house/pyramid-phoenix | 4,166,118,313,192 | 4aee12ba4d3d3c653b2bbc2505538508264674ce | 05973b7cf07eb03099e516f21f06a6e295086862 | /phoenix/services/views/actions.py | e19208a54286e58b04c467472e82f2aaefb339f5 | [
"Apache-2.0"
]
| permissive | https://github.com/bird-house/pyramid-phoenix | 302a0a4339dfea0318cf4b2545f4005b940155b0 | 36737f95f0991661856f182ba35b850829ca401c | refs/heads/master | 2023-09-01T14:00:20.087987 | 2023-05-11T16:56:14 | 2023-05-11T16:56:14 | 23,655,239 | 7 | 18 | Apache-2.0 | false | 2023-05-04T12:20:44 | 2014-09-04T08:52:32 | 2022-06-13T10:27:35 | 2023-05-04T12:20:43 | 23,501 | 7 | 10 | 49 | JavaScript | false | false | from pyramid.view import view_config, view_defaults
from pyramid.httpexceptions import HTTPFound
import logging
LOGGER = logging.getLogger("PHOENIX")
@view_defaults(permission='submit')
class ServiceActions(object):
"""Actions related to service registration."""
def __init__(self, context, request):
self.context = context
self.request = request
self.session = self.request.session
@view_config(route_name='remove_service')
def remove_service(self):
try:
service_id = self.request.matchdict.get('service_id')
self.request.catalog.delete_record(service_id)
self.session.flash('Removed Service.', queue="info")
except Exception:
self.session.flash("Could not remove service.", queue="danger")
return HTTPFound(location=self.request.route_path('services'))
@view_config(route_name='clear_services')
def clear_services(self):
try:
self.request.catalog.clear_services()
self.session.flash('All Service removed.', queue="info")
except Exception:
self.session.flash("Could not remove services.", queue="danger")
return HTTPFound(location=self.request.route_path('services'))
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.add_route('clear_services', '/clear_services')
config.add_route('remove_service', '/services/{service_id}/remove')
| UTF-8 | Python | false | false | 1,537 | py | 160 | actions.py | 76 | 0.664932 | 0.664932 | 0 | 45 | 33.155556 | 76 |
codimite/gostep | 2,688,649,530,659 | 0a68d8bbd9cde60af9d836c966fb6de42ad87504 | b6da76f403fd987954c2c55b1e709b229137adb4 | /gostep/consts.py | 31985df232f49ed33184fabe6c7c28158bcf8df2 | [
"LicenseRef-scancode-other-permissive",
"MIT"
]
| permissive | https://github.com/codimite/gostep | 9fe278a45e8c9886adcdbaf03aa90ee61686083c | 8200eb6887f00761e9d37f62e4c31ea9c548d22e | refs/heads/master | 2022-11-29T15:34:36.354806 | 2020-08-10T16:52:06 | 2020-08-10T16:52:06 | 267,124,960 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | GOSTEP_VERSION = 'v0.1.0beta'
GCLOUD_STORAGE_CLASS = 'STANDARD'
FUNCTIONS_API = 'cloudfunctions'
FUNCTIONS_API_VERSION = 'v1'
SERVICE_ENTRY_POINT = 'main'
ENVIRONMENTS = [
'python',
'nodejs',
'java/plain',
'java/spring'
]
TEMPLATE_REPO = 'https://github.com/codimite/gostep-templates/trunk'
BASE_CONFIG_FILE = 'config.json'
AUTH_FILE = 'credentials.json'
SERVICE_CONFIG_FILE = 'function.json'
TEMPLATE_DIRECTORY = 'templates'
BUILD_DIR = '../build'
GOSTEP_IGNORE_FILE = '.gostepignore'
GOSTEP_BUCKET = 'gostep'
SERVICES = 'services'
TEMPLATES = 'templates'
NAME = 'name'
ALLOW_ALL = 'allow_all'
RUNTIME = 'runtime'
JAVA_RUNTIME = 'java'
DESCRIPTION = 'description'
ENVIRONMENT = 'env'
VERSION = 'version'
SOURCE_DIRECTORY = 'source_dir'
SOURCE_ARCHIVE = 'source_archive'
LOCATION_NAME = 'location_name'
LOCATION_ID = 'location_id'
DEFAULT_LOCATION = 'default_location'
KIND = 'kind'
PROJECT_ID = 'project_id'
SERVICE_ACCOUNT_EMAIL = 'service_account_email'
CHECKSUM = 'checksum'
TRIGGER = 'trigger'
HTTP = 'http'
HTTPS_TRIGGER_KEY = 'httpsTrigger'
EVENT_TRIGGER_KEY = 'eventTrigger'
EVENT_TYPE = 'eventType'
EVENT_TYPE_PUBSUB = 'cloud.pubsub'
EVENT_TYPE_STORAGE = 'cloud.storage'
RESOURCE = 'resource'
VALIDATION_MESSAGES = 'msgs'
REQUIRED_FIELDS = 'required'
TYPE = 'type'
TEXT = 'text'
BOOLEAN = 'boolean'
BRANCH = 'branch'
LEAF = 'leaf'
TRIGGERS = ['http', 'pubsub', 'storage']
COMMANDS = [
'auth',
'init',
'inside',
'projects',
'location',
'locations',
'display-name',
'show',
'base',
'location',
'env',
'service',
'deploy',
'gcloud',
'trigger',
'allow-all',
'version'
]
CMD_BRANCHES = [
'auth',
'base',
'deploy',
'gcloud',
'service'
]
CMD_TREE = {
'auth': {
TYPE: BOOLEAN,
BRANCH: True,
LEAF: False,
'init': {
BRANCH: False,
LEAF: True,
TYPE: TEXT,
REQUIRED_FIELDS: [],
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:'
' gostep auth init <project name>',
' Optional args:\n'
' display-name <service account display name>\n'
' inside <workspace directory>'
]
},
'inside': {
TYPE: TEXT,
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep auth inside <workspace directory>'
]
},
'show': {
BRANCH: False,
LEAF: True,
TYPE: BOOLEAN
},
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep auth init <project name>',
' Optional args:\n'
' display-name <service account display name>\n'
' inside <workspace directory>',
' gostep auth show',
' Optional args:\n'
' inside <workspace directory>'
]
},
'base': {
BRANCH: True,
LEAF: False,
TYPE: BOOLEAN,
'init': {
BRANCH: False,
LEAF: True,
TYPE: TEXT,
REQUIRED_FIELDS: [],
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:'
' gostep base init <project name>',
' Optional args:\n'
' explains <project info>\n'
' inside <workspace directory>\n'
' location <gcloud region id>\n'
' version <project version>'
]
},
'show': {
BRANCH: False,
LEAF: True,
TYPE: BOOLEAN
},
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep base init <project name>',
' Optional args:\n'
' explains <project info>\n'
' inside <workspace directory>\n'
' location <gcloud region id>\n'
' version <project version>'
' gostep base show',
' Optional args:\n'
' inside <workspace directory>'
]
},
'deploy': {
TYPE: TEXT,
BRANCH: False,
LEAF: True,
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep deploy diff',
' Optional args:\n'
' inside <workspace directory>'
' gostep deploy <service name>',
' Optional args:\n'
' inside <workspace directory>'
]
},
'gcloud': {
BRANCH: True,
LEAF: False,
TYPE: BOOLEAN,
REQUIRED_FIELDS: [
{
'projects': {
TYPE: BOOLEAN
}
},
{
'locations': {
TYPE: BOOLEAN
}
}
],
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep gcloud projects',
' gostep gcloud locations',
' Optional args:\n'
' inside <workspace directory>'
]
},
'service': {
TYPE: BOOLEAN,
BRANCH: True,
LEAF: False,
'init': {
TYPE: TEXT,
BRANCH: False,
LEAF: True,
REQUIRED_FIELDS: {
'env': {
TYPE: TEXT,
},
'trigger': {
TYPE: TEXT
}
},
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep service init <service name> env <runtime environment> trigger <function invoker>',
' Optional args:\n'
' explains <project info>\n'
' inside <workspace directory>\n'
' location <gcloud region id>\n'
' version <project version>\n'
' allow-all'
]
},
VALIDATION_MESSAGES: [
'Error: Invalid command.\nUsage:',
' gostep service init <service name> env <runtime environment> trigger <function invoking type>',
' Optional args:\n'
' explains <project info>\n'
' inside <workspace directory>\n'
' location <gcloud region id>\n'
' version <project version>\n'
' allow-all'
]
},
VALIDATION_MESSAGES: [
'GOSTEP - Serverless templates provider for Google cloud platform',
'Version: %s' % GOSTEP_VERSION,
'Usage:',
' gostep auth init <gcloud service account name>',
' gostep auth inside <workspace directory>',
' gostep auth show',
' gostep base init <project name>',
' gostep base show',
' gostep deploy diff',
' gostep deploy <service name>',
' gostep gcloud locations',
' gostep gcloud projects',
' gostep service init <service name> env <runtime environment> trigger <function invoking type>'
]
}
| UTF-8 | Python | false | false | 7,309 | py | 10 | consts.py | 10 | 0.485018 | 0.484471 | 0 | 255 | 27.662745 | 110 |
Shiny-Su/Imperative-Programming | 7,172,595,386,207 | 1d4b967791cb86b9a36ac78d6b1e46cef1c662a3 | fe8a08634393d9db6874e073a016b7e3e0d76977 | /example/17.maze.py | 5773d74ece36a5dc49fc965eb7b2ddf58b299ae9 | []
| no_license | https://github.com/Shiny-Su/Imperative-Programming | 8a1ac0e8960e3d555d98b37eaeff69b582c927af | d5944a70b2506152c8b8047b6e2bf1587a4ac7fc | refs/heads/master | 2020-04-23T23:16:08.256961 | 2019-04-23T10:07:06 | 2019-04-23T10:07:06 | 171,530,395 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # filename = "c :\\ users \\ morse \\ documents \\ imp \\ maze .py"
# exec ( compile ( open ( filename , "rb "). read () , filename , ’exec ’))
maze = """
#################
# # #
# ##### # ##### #
# # # # # #
### # # ##### # #
# # # #
##### ####### # #
# S# # # # #
# ### # ## # ## #
# # ## #
################# """ [1:]. split ("\n")
# val = OrigMaze . find (’S ’)
# val % len ( maze [0]]) , val / len ( maze [0]])
row = -1
idx = -1
for i in range (0 , len ( maze ) ):
idx = maze [i ]. find ('S')
if idx != -1:
row = i
break
def solve(pos, notsolve, path):
posx, posy = pos[0], pos[1]
if (posx, posy) in path:
return None
if posx == 0 or posy == 0 or posx == len(maze[0]) - 1 or posy == len(maze) - 1:
return path
path . append((posx, posy))
if maze[posy + 1][posx] == '':
sol = solve((posx, posy + 1), notsolve, path)
if sol != None:
return sol
if maze[posy - 1][posx] == ' ':
sol = solve((posx, posy - 1), notsolve, path)
if sol != None:
return sol
if maze[posy][posx + 1] == ' ':
sol = solve((posx + 1, posy), notsolve, path)
if sol != None:
return sol
if maze[posy][posx - 1] == ' ':
sol = solve((posx - 1, posy), notsolve, path)
if sol != None:
return sol
notsolve.append(path.pop())
solutions = solve((idx, row), [], [])
# for i in solutions :
# maze [i [1]] = maze [i [1]][0: i [0]] + "x" + maze [i [1]][ i [0]+1:]
for i in range(1, len(solutions)):
baseSol = i == 0 and (solutions[i][0] + 1 == idx or solutions[i][0] - 1 == idx)
nextSol = i != 0 and (solutions[i][0] + 1 == solutions[i - 1][0] or
solutions[i][0] - 1 == solutions[i - 1][0])
y = solutions[i]
if baseSol or nextSol:
maze[y[1]] = maze[y[1]][0: y[0]] + "-" + maze[y[1]][y[0] + 1:]
else:
maze[y[1]] = maze[y[1]][0: y[0]] + "|" + maze[y[1]][y[0] + 1:]
print("\n".join(maze))
| UTF-8 | Python | false | false | 2,062 | py | 104 | 17.maze.py | 57 | 0.425024 | 0.396787 | 0 | 62 | 31.129032 | 87 |
postfactum/djangoapp | 5,248,450,075,903 | e689b017e3fc43758125957327447a560a43d65d | 6e4258e8e780f39a800ffdcc0b92189a3f0b15bd | /blog/views.py | 5fffd2928e5ebcaddc0d069ad39d8dbf70f2a9f1 | []
| no_license | https://github.com/postfactum/djangoapp | 80ec7e7eddad589d12b14eaed691313a81e3182f | 9d04414b9f64adc4626fa040b05d360d16ec63c7 | refs/heads/master | 2021-01-19T04:02:15.995642 | 2017-04-05T19:16:35 | 2017-04-05T19:16:35 | 87,083,027 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.http import HttpResponse
from django.shortcuts import render, render_to_response
from .models import Post
# Create your views here.
def post_list(request):
post_list = Post.objects.all()
search = request.GET.get('search')
if search:
post_list = post_list.filter(text__icontains=search)
return render_to_response('blog/post_list.html', {'post_list': post_list, 'search': search})
def post_view(request, pk):
post = Post.objects.get(id=pk)
return render_to_response('blog/post_view.html', {'post': post})
def test(request):
name = request.user
html = "<p>Hi, {}! Welcome to the custom page.</p".format(name)
return HttpResponse(html)
| UTF-8 | Python | false | false | 702 | py | 4 | views.py | 1 | 0.679487 | 0.679487 | 0 | 31 | 21.645161 | 96 |
Frozen-Soul/Buyer-s-playground | 1,322,849,930,094 | 0cd65182efea20a9a3f0c7c4d39763209ab6a10c | 3d628bf49a53a64375a3efb4675babf86b6ca73c | /venv/lib/python3.6/site-packages/statsmodels/sandbox/archive/linalg_decomp_1.py | 7852bf83971f0c17db64129dc87a66cc41ca13fe | []
| no_license | https://github.com/Frozen-Soul/Buyer-s-playground | ce2928db6cf285ab03fc20455cccf9dd5055abdb | 63b45fa8ff4bce58c45efa5f4b9e12decba43299 | refs/heads/master | 2020-04-26T00:20:15.067506 | 2019-07-20T05:03:36 | 2019-07-20T05:03:36 | 173,174,272 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:287357d1310e902e7f7b5eb0ebc8434357a31a4d797b358aeef8a40f44b8834e
size 9056
| UTF-8 | Python | false | false | 129 | py | 2,705 | linalg_decomp_1.py | 2,201 | 0.883721 | 0.496124 | 0 | 3 | 42 | 75 |
malibustacy/tstoolbox | 15,925,738,769,174 | fc37bc3786985b5b9a1eb589aab8ac2e187c17b6 | d531fd5aa07e23997c2a7da27c8e88ffef3f204e | /tstoolbox/fill_functions.py | 54160fa7019983654d48f8d59b9827006b7c57ea | [
"BSD-3-Clause"
]
| permissive | https://github.com/malibustacy/tstoolbox | 59dbb5ad097cb5241deacdf75a5c6b46f83ff154 | 51ffa55a7c7130a6ce98bbb5d5c937ecc66480d0 | refs/heads/master | 2021-04-15T18:37:31.622113 | 2018-03-16T01:55:28 | 2018-03-16T01:55:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/sjr/beodata/local/python_linux/bin/python
"""A collection of filling routines."""
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
import pandas as pd
import mando
from mando.rst_text_formatter import RSTHelpFormatter
from . import tsutils
@mando.command(formatter_class=RSTHelpFormatter, doctype='numpy')
@tsutils.doc(tsutils.docstrings)
def fill(method='ffill',
interval='guess',
print_input=False,
input_ts='-',
start_date=None,
end_date=None,
columns=None):
"""Fill missing values (NaN) with different methods.
Missing values can occur because of NaN, or because the time series
is sparse. The 'interval' option can insert NaNs to create a dense
time series.
Parameters
----------
method : str
String contained in single quotes or a number that
defines the method to use for filling.
+-----------+---------------------------+
| ffill | assigns NaN values to |
| | the last good value |
+-----------+---------------------------+
| bfill | assigns NaN values to |
| | the next good value |
+-----------+---------------------------+
| 2.3 | any number: fills all NaN |
| | with this number |
+-----------+---------------------------+
| linear | will linearly interpolate |
| | missing values |
+-----------+---------------------------+
| spline | spline interpolation |
+-----------+---------------------------+
| nearest | nearest good value |
+-----------+---------------------------+
| zero | |
+-----------+---------------------------+
| slinear | |
+-----------+---------------------------+
| quadratic | |
+-----------+---------------------------+
| cubic | |
+-----------+---------------------------+
| mean | fill with mean |
+-----------+---------------------------+
| median | fill with median |
+-----------+---------------------------+
| max | fill with maximum |
+-----------+---------------------------+
| min | fill with minimum |
+-----------+---------------------------+
If a number will fill with that number.
interval : str
Will try to insert missing intervals. Can give any
of the pandas offset aliases, 'guess' (to try and figure the
interval), or None to not insert missing intervals.
{print_input}
{input_ts}
{start_date}
{end_date}
{columns}
"""
tsd = tsutils.common_kwds(tsutils.read_iso_ts(input_ts, dropna='all'),
start_date=start_date,
end_date=end_date,
pick=columns)
if print_input is True:
ntsd = tsd.copy()
else:
ntsd = tsd
ntsd = tsutils.asbestfreq(ntsd)
offset = ntsd.index[1] - ntsd.index[0]
predf = pd.DataFrame(dict(list(zip(tsd.columns, tsd.mean().values))),
index=[tsd.index[0] - offset])
postf = pd.DataFrame(dict(list(zip(tsd.columns, tsd.mean().values))),
index=[tsd.index[-1] + offset])
ntsd = pd.concat([predf, ntsd, postf])
if method in ['ffill', 'bfill']:
ntsd = ntsd.fillna(method=method)
elif method in ['linear']:
ntsd = ntsd.apply(pd.Series.interpolate, method='values')
elif method in ['nearest', 'zero', 'slinear', 'quadratic', 'cubic']:
from scipy.interpolate import interp1d
for c in ntsd.columns:
df2 = ntsd[c].dropna()
f = interp1d(df2.index.values.astype('d'), df2.values, kind=method)
slices = pd.isnull(ntsd[c])
ntsd[c][slices] = f(ntsd[c][slices].index.values.astype('d'))
elif method in ['mean']:
ntsd = ntsd.fillna(ntsd.mean())
elif method in ['median']:
ntsd = ntsd.fillna(ntsd.median())
elif method in ['max']:
ntsd = ntsd.fillna(ntsd.max())
elif method in ['min']:
ntsd = ntsd.fillna(ntsd.min())
else:
try:
ntsd = ntsd.fillna(value=float(method))
except ValueError:
raise ValueError("""
*
* The allowable values for 'method' are 'ffill', 'bfill', 'linear',
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'mean', 'median',
* 'max', 'min' or a number. Instead you have {0}.
*
""".format(method))
ntsd = ntsd.iloc[1:-1]
tsd.index.name = 'Datetime'
ntsd.index.name = 'Datetime'
return tsutils.print_input(print_input, tsd, ntsd, '_fill')
#@mando.command(formatter_class=RSTHelpFormatter)
def fill_by_correlation(method='move2',
maximum_lag=0,
interval='guess',
transform='log10',
choose_best='dtw',
print_input=False,
input_ts='-'):
"""Fill missing values (NaN) with different methods.
Missing values can occur because of NaN, or because the time series
is sparse. The 'interval' option can insert NaNs to create a dense
time series.
:param method: String contained in single quotes or a number that
defines the method to use for filling. 'move2': maintenance of
variance extension - 2
:param interval: Will try to insert missing intervals. Can give any
of the pandas offset aliases, 'guess' (to try and figure the
interval), or None to not insert missing intervals.
:param -p, --print_input: If set to 'True' will include the input
columns in the output table. Default is 'False'.
:param -i, --input_ts <str>: Filename with data in 'ISOdate,value'
format or '-' for stdin.
"""
tsd = tsutils.read_iso_ts(input_ts)
if print_input is True:
ntsd = tsd.copy()
else:
ntsd = tsd
ntsd = tsutils.asbestfreq(ntsd)
if transform == 'log10':
ntsd = pd.np.log10(ntsd)
firstcol = pd.DataFrame(ntsd.iloc[:, 0])
basets = pd.DataFrame(ntsd.iloc[:, 1:])
if choose_best is True:
firstcol = pd.DataFrame(ntsd.iloc[:, 0])
allothers = pd.DataFrame(ntsd.iloc[:, 1:])
collect = []
for index in list(range(maximum_lag + 1)):
shifty = allothers.shift(index)
testdf = firstcol.join(shifty)
lagres = testdf.dropna().corr().iloc[1:, 0]
collect.append(pd.np.abs(lagres.values))
collect = pd.np.array(collect)
bestlag, bestts = pd.np.unravel_index(collect.argmax(), collect.shape)
basets = pd.DataFrame(ntsd.iloc[:, bestts + 1].shift(bestlag))
single_source_ts = ['move1', 'move2', 'move3']
if method.lower() in single_source_ts:
if len(basets.columns) != 1:
raise ValueError("""
*
* For methods in {0}
* You can only have a single source column. You can pass in onlu 2
* time-series or use the flag 'choose_best' along with 'maximum_lag'.
* Instead there are {1} source time series.
*
""".format(single_source_ts, len(basets.columns)))
if method == 'move1':
ntsd = firstcol.join(basets)
dna = ntsd.dropna()
means = pd.np.mean(dna)
stdevs = pd.np.std(dna)
print(means[1] + stdevs[1]/stdevs[0]*means[0])
print(means, stdevs)
| UTF-8 | Python | false | false | 7,749 | py | 17 | fill_functions.py | 17 | 0.50884 | 0.503162 | 0 | 202 | 37.356436 | 79 |
GWFrank/ItC-Reversi-Agent | 14,620,068,703,352 | 2b07208579666686779b0878506b54d5ffbb3fbf | a39bd72350f6f81f92a508f588cf5eee96198a32 | /test_agents_multi_rounds.py | a453a47031e09e651b4be25b06dbb5b86c489b30 | []
| no_license | https://github.com/GWFrank/ItC-Reversi-Agent | 80aebfa764c944d6f1a291ff45ba6a56dca26d5b | 26c4fb1892f2e21f37844f5a87bd76a948d0157e | refs/heads/main | 2023-03-09T07:01:36.376641 | 2021-02-22T10:22:25 | 2021-02-22T10:22:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import time
import pickle
from agent.GWFrank_func.match_agents import matchup, matchup_mp, playgame
from agent.GWFrank_func.test_agent_class import RandomTestAgent, MinimaxCountTestAgent, PaperTestAgent
from agent.GWFrank_func.test_agent_class import MinimaxTestAgent, LittleRandomTestAgent, MinimaxModTestAgent
from agent.GWFrank_func.test_agent_class import NEATTestAgent, NEATModTestAgent
from agent.GWFrank_func.eval_funcs import posEval, posEvalEndgameVariation, enhancedPosEval
RTA = RandomTestAgent
MTA = MinimaxTestAgent
LRTA = LittleRandomTestAgent
MMTA = MinimaxModTestAgent
NTA = NEATTestAgent
NMTA = NEATModTestAgent
MCTA = MinimaxCountTestAgent
PTA = PaperTestAgent
nn_file_path = "agent/GWFrank_func/best_trained_with_randomagent.pickle"
with open(nn_file_path, "rb") as f:
nn = pickle.load(f)
if __name__ == "__main__": # Don't delete this line, it's needed for mp to work
# start = time.time() # timer
rounds = 10
core_cnt = os.cpu_count()//2
# core_cnt = 20
balanced = True
depth = 4
random_step = 4
random_agent = RTA()
basic_mm_agent = MTA(posEvalEndgameVariation, depth)
random_mm_agent = LRTA(posEvalEndgameVariation, depth, 0.03)
neat_mm_agent = NTA(nn, depth)
mod_mm_agent = MMTA(posEvalEndgameVariation, depth, random_step)
mod_neat_agent = NMTA(nn, depth, random_step)
mm_cnt_agent = MCTA(posEvalEndgameVariation, depth)
paper_mm_agent = PTA(enhancedPosEval, depth)
agent1 = random_agent
agent2 = paper_mm_agent
matchup_mp(agent1, agent2, rounds, core_cnt, balanced)
# matchup(agent1, agent2, rounds)
print("="*20)
for a in [agent1, agent2]:
W, L, D = a.win, a.loss, a.draw
name = a.agent_name()
print(f"{name} has record {W}-{L}-{D}")
print("="*20)
# end = time.time() # timer
# print(f"test finish in {end-start:.2f}s") # timer | UTF-8 | Python | false | false | 1,911 | py | 20 | test_agents_multi_rounds.py | 16 | 0.700157 | 0.688121 | 0 | 62 | 29.83871 | 108 |
jahedev/euler | 1,382,979,494,177 | b3c46d21440f33e776f6d67ba2deda821c637c62 | eb23644a86a4ffa1fb2dcd612816e98709105045 | /prime_xp.py | 5fd78998fdb2f14183f2866898805122f76687dd | []
| no_license | https://github.com/jahedev/euler | d45837774d394bd06c94e77e0e64c47d6592b504 | 27904498a8ccd787628644c9df8e5c31ff834751 | refs/heads/master | 2022-02-26T03:27:54.241516 | 2019-09-14T07:00:13 | 2019-09-14T07:00:13 | 206,631,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
def pbar(string, curr, cap):
print('\r %s %.2f%s \r' % (string,(curr/cap)*100,'%'), end='\r')
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
### START HELPING FUNCTIONS ###
# find smallest factor of any number, returns -1 if not found.
def smallest_factor(n):
# try dividing 'n' starting from the number 2 upto half the size of 'n',
# if the number is divisible with no remainder, then return that as the smallest factor of 'n'.
for i in range(2, int(n/2) + 1):
if n % i == 0:
return i
return -1
# find all the factors of a number
def get_factors(n):
last_factor = smallest_factor(n) # last successful factor
current_factor = last_factor # current factor will end the loop, when it equals -1
factors = [] # where all the factors will be stored
last_divided_number=n # to keep track of the found factors 'n' is divided by
# smallest_factor(n) will return -1, when there is no smallest factor on 'n'
while current_factor != -1:
last_factor = current_factor # save the last successful factor in last_factor
factors.append(last_factor)
last_divided_number /= last_factor
current_factor = smallest_factor(last_divided_number)
# multiply every factor, and divide that in 'n' to get the largest factor
factors_multiplied=1
for factor in factors:
factors_multiplied *= factor
largest_factor = int(n / factors_multiplied)
factors.append(largest_factor)
return factors
### END HELPING FUNCTIONS ###
def is_prime(n):
f = get_factors(n)
print(is_prime(2))
"""
def is_prime(n):
# We know 1 is not a prime number
if n == 1:
return False
# We store the number of factors in this variable
factors = 0
# This will loop from 1 to n
for i in range(1, n+1):
# Check if `i` divides `n`, if yes then we increment the factors
if n % i == 0:
factors += 1
# If total factors are exactly 2
if factors == 2:
return True
return False
"""
def check_primes(prime_list):
not_primes = []
count = 0
for num in prime_list:
if not is_prime(num):
not_primes.append(num)
count += 1
printProgressBar(count, len(prime_list), prefix = 'Checking Primes:', suffix = '', length = 50)
if len(not_primes) == 0:
print('All numbers in list are primes.')
else:
print('The following numbers are not prime:')
print(not_primes)
for i in range(2,20000000):
printProgressBar(i, 20000000, prefix = 'Checking Primes:', suffix = '', length = 50)
print(is_prime(2)) | UTF-8 | Python | false | false | 3,622 | py | 21 | prime_xp.py | 21 | 0.610221 | 0.593923 | 0 | 106 | 33.160377 | 106 |
NateWeiler/Resources | 12,412,455,527,748 | e773796f4f551458f380919a5903c6b4d879da8f | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/pyinstaller/tests/functional/specs/several-scripts/main-script2.py | caceedb4bd77a8a0607b023f202fc8b07c45bc04 | []
| no_license | https://github.com/NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | false | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | 2021-12-09T12:39:04 | 2022-09-08T15:20:18 | 2,434,051 | 1 | 0 | 32 | null | false | false | version https://git-lfs.github.com/spec/v1
oid sha256:5bd2da445c4e3bc03b6f8d204e9f17d88d7cbf37b6297cb63f6ef53f8d60878f
size 95
| UTF-8 | Python | false | false | 127 | py | 36,207 | main-script2.py | 16,386 | 0.88189 | 0.543307 | 0 | 3 | 41.333333 | 75 |
marvpaul/ContentManagementTasks | 4,569,845,209,627 | fbb10d8c5d3f33ff73484c3ae93239ad53353708 | f24edead7aac1c8826effb704d1f5272eaca2a01 | /Notebook1/Euler22.py | a0be44786425a250a0afb36278a1f3deb674decb | []
| no_license | https://github.com/marvpaul/ContentManagementTasks | dfa0093ca065aba84d1aff5789dbe373023e8324 | a2ce626d7361095c3b556a8b8e392dc800c437d6 | refs/heads/master | 2021-09-03T17:46:43.028761 | 2018-01-10T20:52:59 | 2018-01-10T20:52:59 | 107,541,830 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def getScore(name, position):
"this function calculates the score for a name depending on chars and the position in a name list"
score = 0
for char in list(name):
score += ord(char) - ord('A') + 1
return score
def readAndProcessNamelist(path):
"This function reads and process a given name list located at path"
with open(path, 'r') as f:
line = f.read()
line = line.replace('\"', '')
nameArray = line.split(',')
nameArray.sort()
return nameArray
#Euler 22
nameArray = readAndProcessNamelist('p022_names.txt')
score = 0
#Iterate through all the names in the given name list
for idx, name in enumerate(nameArray):
score += getScore(name, idx+1) * (idx+1)
#Here we go, print it out!
print(score) | UTF-8 | Python | false | false | 758 | py | 41 | Euler22.py | 28 | 0.664908 | 0.651715 | 0 | 27 | 27.111111 | 102 |
perrozzi/cmg-cmssw | 12,893,491,830,885 | e88c6860b8155af3e80a524e7e5ea8392e49de64 | 5330918e825f8d373d3907962ba28215182389c3 | /FastSimulation/Tracking/python/IterativeFifthTrackFilter_cff.py | b679063f4de3503f957dbfae4d35565454f79f50 | []
| no_license | https://github.com/perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | true | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | 2016-11-03T08:01:49 | 2017-05-11T22:43:16 | 959,175 | 0 | 0 | 1 | C++ | null | null | import FWCore.ParameterSet.Config as cms
import RecoTracker.FinalTrackSelectors.selectHighPurity_cfi
fifthStep = RecoTracker.FinalTrackSelectors.selectHighPurity_cfi.selectHighPurity.clone(
src = 'iterativeFifthTrackMerging',
##keepAllTracks = True,
copyExtras = True,
copyTrajectories = True,
chi2n_par = 0.25,
res_par = ( 0.003, 0.001 ),
##minNumberLayers = 6,
minNumberLayers = 4,
minNumber3DLayers = 2,
maxNumberLostLayers = 0,
d0_par1 = ( 1.2, 4.0 ),
dz_par1 = ( 1.1, 4.0 ),
d0_par2 = ( 1.2, 4.0 ),
dz_par2 = ( 1.1, 4.0 )
)
fifthfilter = cms.EDProducer("QualityFilter",
TrackQuality = cms.string('highPurity'),
recTracks = cms.InputTag("fifthStep")
)
iterativeFifthTrackFiltering = cms.Sequence(fifthStep*fifthfilter)
| UTF-8 | Python | false | false | 737 | py | 2,731 | IterativeFifthTrackFilter_cff.py | 2,216 | 0.7327 | 0.679783 | 0 | 27 | 26.222222 | 88 |
openstack/horizon | 1,236,950,631,674 | e1a327a6b91fa4f69fa4d2cc0a6f499731577cf3 | 69d8d91954f6623f3674d52d734d589f72383628 | /openstack_dashboard/dashboards/project/api_access/forms.py | 9caf3d974887e0112cb70028fe7588381480643e | [
"Apache-2.0"
]
| permissive | https://github.com/openstack/horizon | d031cebe126c06ad9717bbc52790b3d890e8661e | 7896fd8c77a6766a1156a520946efaf792b76ca5 | refs/heads/master | 2023-09-04T06:57:58.069907 | 2023-09-01T20:17:10 | 2023-09-01T20:17:10 | 2,665,166 | 1,060 | 1,175 | Apache-2.0 | false | 2023-08-07T02:33:44 | 2011-10-28T13:12:05 | 2023-07-28T08:39:05 | 2023-08-06T07:39:56 | 354,181 | 1,303 | 1,250 | 0 | Python | false | false | # Copyright 2016 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
def get_ec2_credentials(request):
if not policy.check((("identity", "identity:ec2_list_credentials"),),
request):
return None
project_id = request.user.project_id
all_keys = api.keystone.list_ec2_credentials(request,
request.user.id)
keys = [x for x in all_keys if x.tenant_id == project_id]
if not keys:
return None
return {'ec2_access_key': keys[0].access,
'ec2_secret_key': keys[0].secret}
class RecreateCredentials(forms.SelfHandlingForm):
def handle(self, request, context):
try:
credential = get_ec2_credentials(request)
if credential:
api.keystone.delete_user_ec2_credentials(
request,
request.user.id,
credential['ec2_access_key'])
except Exception:
exceptions.handle(
request, _('Unable to recreate ec2 credentials. '
'Failed to delete ec2 credentials.'))
return False
try:
api.keystone.create_ec2_credentials(
request,
request.user.id,
request.user.project_id)
message = _('Successfully recreated ec2 credentials.')
messages.success(request, message)
return True
except Exception:
exceptions.handle(
request, _('Unable to recreate ec2 credentials. '
'Failed to create ec2 credentials.'))
return False
| UTF-8 | Python | false | false | 2,435 | py | 2,064 | forms.py | 1,011 | 0.608624 | 0.598768 | 0 | 68 | 34.808824 | 78 |
AIPHES/emnlp19-moverscore | 2,259,152,839,797 | c3e36c54ef57e2107006dceb7bea5b1346b32393 | 3c000b258cf5c3b98818a6b7ff3d76a096da34fe | /webservice/server/server/summ_eval/server/zmq_decor.py | d41a3049fd3ea6f019b9cea5b6a0d9d2a694cf7e | [
"MIT"
]
| permissive | https://github.com/AIPHES/emnlp19-moverscore | d01c0559f0ec1164c338721ba86135d8ff7d51ed | 0459a3b3b3bd73baa0cc515a355228ee5a2887e0 | refs/heads/master | 2023-01-22T22:20:48.918011 | 2023-01-18T22:20:17 | 2023-01-18T22:20:17 | 204,743,853 | 184 | 38 | MIT | false | 2023-01-18T22:20:19 | 2019-08-27T16:25:15 | 2023-01-18T12:36:02 | 2023-01-18T22:20:17 | 7,376 | 164 | 23 | 11 | Python | false | false | from contextlib import ExitStack
from zmq.decorators import _Decorator
__all__ = ['multi_socket']
from functools import wraps
import zmq
class _MyDecorator(_Decorator):
def __call__(self, *dec_args, **dec_kwargs):
kw_name, dec_args, dec_kwargs = self.process_decorator_args(*dec_args, **dec_kwargs)
num_socket_str = dec_kwargs.pop('num_socket')
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
num_socket = getattr(args[0], num_socket_str)
targets = [self.get_target(*args, **kwargs) for _ in range(num_socket)]
with ExitStack() as stack:
for target in targets:
obj = stack.enter_context(target(*dec_args, **dec_kwargs))
args = args + (obj,)
return func(*args, **kwargs)
return wrapper
return decorator
class _SocketDecorator(_MyDecorator):
def process_decorator_args(self, *args, **kwargs):
"""Also grab context_name out of kwargs"""
kw_name, args, kwargs = super(_SocketDecorator, self).process_decorator_args(*args, **kwargs)
self.context_name = kwargs.pop('context_name', 'context')
return kw_name, args, kwargs
def get_target(self, *args, **kwargs):
"""Get context, based on call-time args"""
context = self._get_context(*args, **kwargs)
return context.socket
def _get_context(self, *args, **kwargs):
if self.context_name in kwargs:
ctx = kwargs[self.context_name]
if isinstance(ctx, zmq.Context):
return ctx
for arg in args:
if isinstance(arg, zmq.Context):
return arg
# not specified by any decorator
return zmq.Context.instance()
def multi_socket(*args, **kwargs):
return _SocketDecorator()(*args, **kwargs)
| UTF-8 | Python | false | false | 1,929 | py | 26 | zmq_decor.py | 20 | 0.580093 | 0.579575 | 0 | 61 | 30.622951 | 101 |
vishnuvardhanmanne/CS5590-Python-DL | 7,997,229,114,468 | 7dccb14d68d48c9428cd4ce0349ada33ab2d40da | 7cf9d541fad5b1cf34e7f81be4be2cec21161844 | /ICP-2/3rd_solutions.py | d7a4c652ebb7671e4a2e229dff686bdc67058cde | []
| no_license | https://github.com/vishnuvardhanmanne/CS5590-Python-DL | 76de0e5a82a2b694d0cdfeaf7d38eb8baa2078b7 | 824d4e99fa7adf96ef5fb0591554ba9da1bbc3ac | refs/heads/master | 2020-12-31T10:49:05.740448 | 2020-05-13T19:14:12 | 2020-05-13T19:14:12 | 239,008,393 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import *
def word_count(file_name):
with open(file_name) as f:
return Counter(f.read().split())
print("The number of words in the given file is:",word_count("text.txt")) | UTF-8 | Python | false | false | 198 | py | 36 | 3rd_solutions.py | 17 | 0.686869 | 0.686869 | 0 | 5 | 38.8 | 73 |
jvinetz/super-octo-dollop | 3,539,053,089,392 | 326ff96f3bebd9190dfc6e42a7295fdece4a5633 | 9b9853b0d725c45dc1ead9cc6014c4d99e083a4c | /main_driver.py | 76e380f567e101a0e61a3711286071d4ecb3b456 | []
| no_license | https://github.com/jvinetz/super-octo-dollop | 052995b817538a17b0818aeb0520e36601729676 | ed4fb2aadbb2f1d198ea08ad40fbe208c6a01746 | refs/heads/master | 2020-09-10T07:42:19.593623 | 2019-12-23T10:49:02 | 2019-12-23T10:49:02 | 221,688,163 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import re
import sys
import pandas as pd
from geopy.geocoders import Nominatim
from DB import DB
from log import Logger
from driver_class import Driver
from scraper_class import Scraper
from db_con import host, user, passwd, use_pure, database, buffered
URL = "https://www.waytostay.com/en"
CSV = r'csv/data.csv'
DB = DB(host, user, passwd, use_pure, database, buffered)
con = DB.my_db
log = Logger()
scraper = Scraper()
def update_db(user_city):
"""Updates the database with the information the user placed as input"""
url = URL
scraper = Scraper()
dr = scraper.driver
soup = dr.get_info(url)
web_page = scraper.find_city(soup, user_city)
city_soup = dr.get_info(web_page)
num_pages = scraper.find_num_pages(city_soup, log)
df = create_table(num_pages, web_page, dr, city_soup)
df['sleeps'].apply(lambda x: int(x))
df['area_sqm'].apply(lambda x: int(x))
df['bedrooms'].apply(lambda x: int(x) if x != 'studio' else 0)
df['bathroom'].apply(lambda x: int(x))
df['price'].apply(lambda x: int(x))
DB.update_city(user_city, df)
dr.close()
dr.quit()
return df
def create_table(num_pages, web_page, driver, city_soup):
"""Scraps the input page and returns a dataframe with the information"""
arr = []
for i in range(num_pages):
city_page = city_soup.find_all('div', class_="tile")
for city in city_page:
price = re.search(r'(>)([€£]\w*\s[0-9]*)<', str(city)).group(2)
page_link = city.a['href']
detail = city.p.text.split()
if detail[1] == 'sqm':
detail = [detail[0]] + ['0', '0'] + detail[1:]
dic = {"city": web_page, "page_link": page_link, 'sleeps': detail[1], 'area_sqm': detail[2],
'bedrooms': detail[4], 'bathroom': detail[6], 'price': price[2:], 'currency': price[0]}
arr.append(dic)
if num_pages != 1:
city_soup = driver.next_page(i, web_page)
df = pd.DataFrame(arr)
return df
def get_results(args, df):
"""Filters the dataframe with the ranges selected by the user"""
if args.p:
if args.argp2:
df = df[df['price'] < args.argp2]
df = df[df['price'] > args.argp1]
if args.s:
if args.args2:
df = df[df['sleeps'] < args.args2]
df = df[df['sleeps'] > args.args1]
if args.a:
if args.arga2:
df = df[df['area_sqm'] < args.arga2]
df = df[df['area_sqm'] > args.arga1]
if args.be:
if args.argbe2:
df = df[df['bedrooms'] < args.argbe2]
df = df[df['bedrooms'] > args.argbe1]
if args.ba:
if args.argba2:
df = df[df['bathroom'] < args.argba2]
df = df[df['bathroom'] > args.argba1]
return df
def parser():
"""The function calls the scraper to scrap and shows results according to the parameters the user selected as
inputs """
parser = argparse.ArgumentParser(
description='Must insert argument -G for global update or --city "city_name" for city update. '
'Then insert the rest of the arguments if wanted')
parser.add_argument('-G', action="store_true", help='Global update')
parser.add_argument('--city', help='city')
parser.add_argument('-p', action="store_true", help='price')
parser.add_argument('--argp1', nargs='?', default=0, type=int, help='price lower limit')
parser.add_argument('--argp2', nargs='?', type=int, help='price higher limit')
parser.add_argument('-s', action="store_true", help='sleeps')
parser.add_argument('--args1', nargs='?', default=0, type=int, help='sleeps lower limit')
parser.add_argument('--args2', nargs='?', type=int, help='sleeps higher limit')
parser.add_argument('-a', action="store_true", help='area')
parser.add_argument('--arga1', nargs='?', default=0, type=int, help='area lower limit')
parser.add_argument('--arga2', nargs='?', type=int, help='area higher limit')
parser.add_argument('-be', action="store_true", help='bedrooms')
parser.add_argument('--argbe1', nargs='?', default=0, type=int, help='bedrooms lower limit')
parser.add_argument('--argbe2', nargs='?', type=int, help='bedrooms higher limit')
parser.add_argument('-ba', action="store_true", help='bathrooms')
parser.add_argument('--argba1', nargs='?', default=0, type=int, help='bathrooms lower limit')
parser.add_argument('--argba2', nargs='?', type=int, help='bathrooms higher limit')
parser.add_argument('--curr', action="store_true", help='currency')
args = parser.parse_args()
return args, parser
def get_coords(city):
"""The function gets the latitud and longitud for the input city"""
geolocator = Nominatim(user_agent="ITC_DM")
location = geolocator.geocode(city, timeout=5)
latitude = location.latitude
longitude = location.longitude
return latitude, longitude
def main():
args, par = parser()
if args.city:
update_db(args.city)
df = DB.get_query_df("""SELECT * FROM place""")
results = get_results(args, df)
print("The city has been updated/created in the database")
print(results)
elif args.G:
df = scraper.global_update()
DB.update_global(df)
print(df)
print("The database has been created/updated")
else:
print(
"\nThere were not enough parameters to scrap, please be sure to input at least the '-G' or '--city' "
"parameters\n")
par.print_help()
sys.exit(1)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 5,632 | py | 19 | main_driver.py | 16 | 0.608989 | 0.600462 | 0 | 159 | 34.402516 | 113 |
fishesandchip/scrabble-application | 14,680,198,225,168 | 1d554e8126742200b5e0f1e6b92c74fa2df5b8a0 | 4fc8266f55e6d88dfd1c260851a1e0c83046b5b3 | /Main.py | adf45a32be38c4b76de67cfa1c5b4d1eecbee673 | []
| no_license | https://github.com/fishesandchip/scrabble-application | d008622277bb1f11a3bef70dd767daa75f093ab3 | 971a8243b69f0ba89f57d46375fa534598b3a07b | refs/heads/master | 2021-01-17T14:40:31.851724 | 2016-07-30T17:14:07 | 2016-07-30T17:14:07 | 55,259,930 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui
from GUI import Main
# ----------------------------#
# Main scrabble loop #
# ----------------------------#
def main():
app = QtGui.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 354 | py | 2 | Main.py | 2 | 0.457627 | 0.451977 | 0 | 21 | 15.666667 | 38 |
brightrif/learningDJ | 7,739,531,095,859 | e24754d713e359f18e3d6d0e19ce60afc421a2c9 | cfb3921a4f289b8c8454ee2b7d345b5a077b9453 | /event/migrations/0002_auto_20201115_1414.py | 2229cc2a26282af0f04b58576b2eb9850648fff4 | []
| no_license | https://github.com/brightrif/learningDJ | 31aa849933e675b354d63a5c86b37e37ff9cd9ca | f7a95d251bf6ca05caebfb9b8e96a1ddebe2259f | refs/heads/master | 2023-01-18T21:31:46.724244 | 2020-11-18T12:19:19 | 2020-11-18T12:19:19 | 312,971,512 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-11-15 11:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('event', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='endtime',
new_name='endTime',
),
migrations.RenameField(
model_name='event',
old_name='starttime',
new_name='startTime',
),
migrations.AddField(
model_name='event',
name='createdBy',
field=models.ForeignKey(default='2', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='created by'),
),
migrations.AddField(
model_name='event',
name='createdTime',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='event',
name='eventdescription',
field=models.CharField(max_length=300, null=True, verbose_name='Event Description'),
),
migrations.AlterField(
model_name='event',
name='eventlocation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event.eventlocation', verbose_name='Event Location'),
),
migrations.AlterField(
model_name='event',
name='eventname',
field=models.CharField(max_length=200, verbose_name='Event Name'),
),
migrations.AlterField(
model_name='event',
name='eventorganizer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event.organizer', verbose_name='Event Orgainzer'),
),
migrations.AlterField(
model_name='event',
name='eventtype',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event.eventtype', verbose_name='Event Type'),
),
]
| UTF-8 | Python | false | false | 2,276 | py | 18 | 0002_auto_20201115_1414.py | 14 | 0.579525 | 0.568102 | 0 | 62 | 34.709677 | 150 |
jnguyen4103/pcpp-assistant | 8,950,711,854,940 | a6c518e51b0b13d8400a4bc8e7d70ef1eeab224f | a9302cf336814e558063b31753863926549c4ac8 | /backend/data/csv_sort.py | c3ba6d30fe19f70021fdc3a2642208c6eda45382 | []
| no_license | https://github.com/jnguyen4103/pcpp-assistant | 50328fabaaf8af170421abc02a5ab329c01223b4 | c716171625fe4de025709aa51fb9a0f0e3c7d41f | refs/heads/develop | 2023-04-09T19:59:35.803897 | 2021-04-22T18:40:36 | 2021-04-22T18:40:36 | 331,139,045 | 1 | 0 | null | false | 2021-04-22T08:23:48 | 2021-01-19T23:38:51 | 2021-04-20T22:37:37 | 2021-04-22T08:23:47 | 22,782 | 1 | 0 | 8 | Python | false | false | import csv
import sys
import operator
reader = csv.reader(open("case.csv"), delimiter=",")
sortedlist = sorted(reader, key=operator.itemgetter(0), reverse=False)
with open('sorted-case.csv', 'w') as f:
names = ['columnName_1', 'columnName_2', 'columnName_3', 'columnName_4', 'columnName_5', 'columnName_6',
'columnName_7', 'columnName_8', 'columnName_9', 'columnName_10', 'columnName_11', 'columnName_12']
writer = csv.writer(f)
for row in sortedlist:
writer.writerow(row)
| UTF-8 | Python | false | false | 508 | py | 66 | csv_sort.py | 45 | 0.667323 | 0.635827 | 0 | 13 | 38.076923 | 111 |
755/nwwhois | 18,004,502,919,093 | 447300309fc6d07a74b56d92caf05eeea1d6efdb | c73ddb75c19da4ab5b76a2d45b80d998588f7776 | /whois/__init__.py | ecd2a7f79c42dc7a5fea42ccb239a97cf64a92b5 | []
| no_license | https://github.com/755/nwwhois | d0c996ba7804beaa844f07c59e1b6aaf290b87ea | f91ddcc10aa1b9113386fdd69188a69ab28badc0 | refs/heads/master | 2016-09-06T12:41:17.174152 | 2014-06-16T14:51:02 | 2014-06-16T14:51:02 | 20,839,278 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'nazarov'
| UTF-8 | Python | false | false | 23 | py | 4 | __init__.py | 4 | 0.565217 | 0.565217 | 0 | 1 | 22 | 22 |
eriknw/cygraphblas | 19,542,101,222,609 | 81dd8324145e900009575e92e92405de84b276fe | 9927a8826eb8f0bbe0b46b3d148282d7f2b9e4ed | /cygraphblas/lib/constants/desc_field/ss.py | d534f771b77e9a4341a21ee582e6c7658fb612c8 | [
"Apache-2.0"
]
| permissive | https://github.com/eriknw/cygraphblas | 2db07e4a3bc490ad4c78e78069c12f64d4c63888 | 81ae37591ec38aa698d5f37716464a6c366076f9 | refs/heads/master | 2023-04-07T02:00:59.999578 | 2020-09-03T21:28:30 | 2020-09-03T21:28:30 | 292,686,757 | 0 | 0 | Apache-2.0 | true | 2020-09-03T21:47:52 | 2020-09-03T21:47:51 | 2020-09-03T21:47:24 | 2020-09-03T21:38:38 | 0 | 0 | 0 | 0 | null | false | false | from cygraphblas_ss.lib.constants.desc_field import *
| UTF-8 | Python | false | false | 54 | py | 80 | ss.py | 27 | 0.814815 | 0.814815 | 0 | 1 | 53 | 53 |
kdaivam/PythonPrep | 17,901,423,702,823 | 1c83ce78f9e7cfd1db5af23120e8e57302fe7cdf | 82be17841875e686c2ce25faefa41c8a603ac12e | /Leetcode/pairWithEqualSums.py | a760c6edaac79c7d69e40423ac968a03ca531d10 | []
| no_license | https://github.com/kdaivam/PythonPrep | 338022ce8847b2ab221b4ec7bf134a9c962cb3e2 | 1e4d654ee9b44ed63a287d67a802a04dca4f2d42 | refs/heads/master | 2020-03-08T17:01:28.437958 | 2019-06-10T05:46:27 | 2019-06-10T05:46:27 | 128,257,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def pairWithEqualSums(a):
tmp = {}
for i in range(len(a)):
for j in range(i+1, len(a)):
k = a[i]+a[j]
if k in tmp:
tmp[k].append((a[i],a[j]))
else:
tmp[k] = [(a[i],a[j])]
result = []
for k, v in tmp.items():
if len(v) > 1:
result.append(v)
return result
a = [9, 4, 3, 1, 7, 12]
print(pairWithEqualSums(a)) | UTF-8 | Python | false | false | 438 | py | 142 | pairWithEqualSums.py | 142 | 0.408676 | 0.388128 | 0 | 17 | 23.882353 | 42 |
botanicalpilot/bootcamp | 3,418,793,973,495 | 7c03f3742fb13f381f2e0e1bf429002da684e167 | c98b719d95f3e3cd33e9c4b705a03232d3185733 | /practice3_8.py | 805f44bac6f062f1bfa8e30cfe4748d99becc4d3 | [
"MIT"
]
| permissive | https://github.com/botanicalpilot/bootcamp | 42b24d120c82034b35ae92e1484420c8e5898376 | caf580e38d4284f679274f6996fa3ec1cf64e38d | refs/heads/master | 2020-08-04T19:14:20.012066 | 2019-11-16T04:23:37 | 2019-11-16T04:23:37 | 212,249,505 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def combine(listA, listB):
#create a list with nothing in it. Multiply it by the total len of the lists taken by the function
combined = [None]*(len(listA) + len(listB))
#assign every other item in combined list to listA items starting at 0
combined[::2] = listA
#assign every other item in combined list to listB items starting at 1
combined[1::2] = listB
return combined
print(combine([1, 2, 3], ['a', 'b', 'c']))
| UTF-8 | Python | false | false | 455 | py | 61 | practice3_8.py | 55 | 0.659341 | 0.641758 | 0 | 10 | 44.1 | 102 |
tt-n-walters/saturday-python | 7,206,955,127,180 | a35ea42a392886f719af60110bbe8373c4411ff7 | 5504f5488f9b2a07c600b556f6a14cb6f08c9b12 | /dictionary.py | a617bb3d44603e18e7e1dd0ba66159ec0753f0f3 | []
| no_license | https://github.com/tt-n-walters/saturday-python | 4087028e24ff1c3e80b705b5a49c381f02bc1d84 | 2ad53feb45b5a0e21b927bce25d52c8d2c679793 | refs/heads/master | 2020-12-26T21:46:18.240026 | 2020-04-18T17:40:23 | 2020-04-18T17:40:23 | 237,655,096 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
words = {
"hi": 1001,
"goodbye": 1002,
"idiot": 9999
}
words_to_check = ["hi", "goodbye", "hello", "idiot", "moron"]
for word in words_to_check:
if words.get(word):
print(f"Yes they have used the word '{word}'")
else:
print(f"No, they haven't used the word '{word}'")
| UTF-8 | Python | false | false | 308 | py | 34 | dictionary.py | 32 | 0.555195 | 0.516234 | 0 | 14 | 20.928571 | 61 |
josteinstraume/python-for-everybody | 8,091,718,431,493 | 6dfc2cdfe57a35884ad0261e38f2277aa3e270c0 | 50feab01b5ad6aa5868f62a2405acfad15b185c6 | /peer_grade.py | 42ce901ce03e773e973ccb3cb1320eccc92b6533 | []
| no_license | https://github.com/josteinstraume/python-for-everybody | f4766d5d689bf032d2f0aa89c9ece7b98d43c4bf | b07333bd4ad7d986065112ba67575e7d3a5abae7 | refs/heads/master | 2021-01-11T15:17:49.213413 | 2017-01-29T00:32:14 | 2017-01-29T00:32:14 | 80,320,275 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print "'ELLO POPPET" | UTF-8 | Python | false | false | 20 | py | 13 | peer_grade.py | 12 | 0.75 | 0.75 | 0 | 1 | 20 | 20 |
cdncdn90/brana | 12,678,743,478,400 | 3102128180f6b8b7b07b4fc1b1b359de7e9b72cc | e45fc27e1e49454f98629ca7dd135459ec27b2f0 | /brana/view.py | 6c31e3cae1d32c86b504b1dbf069ad98ad26ba58 | []
| no_license | https://github.com/cdncdn90/brana | c90bd43676344575d162aebb435a01bade16d971 | 1823b60b85ff610a3da504243498e7181fd6c3cf | refs/heads/master | 2020-05-24T07:23:43.788998 | 2019-05-20T03:34:18 | 2019-05-20T03:34:18 | 187,158,847 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.http import HttpResponse
def hello(request):
return HttpResponse("Hello world ! ")
def balabala(req):
return HttpResponse("balabala")
| UTF-8 | Python | false | false | 158 | py | 1 | view.py | 1 | 0.727848 | 0.727848 | 0 | 9 | 16.555556 | 41 |
yuanpengX/learngit | 9,560,597,239,137 | e86088f5b522cf60095c316adc591942a4bdc37d | 9702d504cc099e4c22f49e0f60febd375469e4df | /python_log/day_08.py | 923240a2a94b9f6fabae261e4150b22222cf858a | []
| no_license | https://github.com/yuanpengX/learngit | 998e6683bf9d269e94132c88b4d93551247162bd | fa72bf07a4e9d487bbf65b8c4460c4574a2ea005 | refs/heads/master | 2016-09-05T23:30:30.965141 | 2015-05-20T04:22:51 | 2015-05-20T04:22:51 | 29,565,429 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
def sef(pattern,alter,filename1,filename2):
print pattern
try:
fp1 = open(filename1,'r');
fp2 = open(filename2,'w');
str1 = fp1.read();
while (str1!=''):
print str1
if (cmp(str1,pattern)==0):
fp2.write(alter);
else:
fp2.write(str1);
str1 = fp1.read();
except:
print 'something went wrong';
sef('hello','nonono','test1','test2');
| UTF-8 | Python | false | false | 374 | py | 50 | day_08.py | 32 | 0.620321 | 0.569519 | 0 | 18 | 19.722222 | 43 |
dacianmujdar/parking-django | 18,648,748,005,239 | 445e767a545cbf43afbeba6cf1c9158328ffd9b2 | ff8881bcce3e4eb54b9294ec97789a3baf336ecf | /parking_project/parking_space/serializers.py | 3f39f2bc142a4b3731ae9f6b97052a0c98d9eb0a | []
| no_license | https://github.com/dacianmujdar/parking-django | a659f47761573b0eb0ca5e655a815794d398872a | b8e8741df734815ae854bc5706a75eba2b8f5e10 | refs/heads/master | 2021-09-17T19:53:46.115448 | 2018-07-04T19:03:54 | 2018-07-04T19:03:54 | 116,795,124 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from parking_project.account.serializers import AccountSerializer
from parking_project.parking_space.models import ParkingSpace
class ParkingSpaceSerializer(serializers.ModelSerializer):
allocated_to = AccountSerializer(read_only=True)
class Meta:
model = ParkingSpace
fields = ('id', 'code', 'is_occupied', 'times_extended', 'start_date', 'expiration_date', 'allocated_to')
| UTF-8 | Python | false | false | 442 | py | 45 | serializers.py | 40 | 0.757919 | 0.757919 | 0 | 12 | 35.833333 | 113 |
akenoq/homework-4 | 7,413,113,574,435 | e305788c02afd28dfdb0551d60ef1ea6117d3602 | 092a4ebf83943f2278fd2c4f447a8acac0960d2d | /tests/TestsMethods/CreateAlbumTestsMethods.py | 9fc36b0290f3c3625a0b238fe317eb7e98ded78d | []
| no_license | https://github.com/akenoq/homework-4 | 3be9270a54aad2b5d1df45da106600f04e49afe6 | d54239092cf4540a6d63ed3615c3c4c3dbde6dd9 | refs/heads/master | 2020-03-19T15:06:16.270474 | 2018-05-25T16:15:24 | 2018-05-25T16:15:24 | 131,852,660 | 0 | 0 | null | true | 2018-05-02T13:13:47 | 2018-05-02T13:13:47 | 2016-04-03T19:03:52 | 2017-12-30T00:22:33 | 0 | 0 | 0 | 0 | null | false | null | from tests.pages.primaryPhotoSection.AlbumPage import AlbumPage
from tests.pages.primaryPhotoSection.EditAlbumPage import EditAlbumPage
from tests.pages.primaryPhotoSection.PhotoSectionPage import PhotoSectionPage
class CreateAlbumTestsMethods:
def __init__(self, driver):
self.driver = driver
def create_album_from_photo_section_page(self, name):
photo_section_page = PhotoSectionPage(self.driver)
photo_album_list = photo_section_page.photo_albums_list_bar
photo_album_list.open_create_album_popup()
create_album_popup = photo_section_page.create_album_popup
create_album_popup.set_album_name(name)
create_album_popup.submit_album_creation()
def delete_album_from_photo_section(self, name):
photo_section_page = PhotoSectionPage(self.driver)
photo_album_list = photo_section_page.photo_albums_list_bar
photo_album_list.open_album_with_name(name)
album_page = AlbumPage(self.driver)
album_action_panel = album_page.action_panel
album_action_panel.edit_album()
edit_album_page = EditAlbumPage(self.driver)
edit_album_action_panel = edit_album_page.action_panel
edit_album_action_panel.open_delete_album_popup()
delete_album_popup = edit_album_page.delete_album_popup
delete_album_popup.press_delete_button_in_popup()
def close_create_album_popup(self):
photo_section_page = PhotoSectionPage(self.driver)
create_album_popup = photo_section_page.create_album_popup
create_album_popup.close_create_album_popup()
| UTF-8 | Python | false | false | 1,607 | py | 59 | CreateAlbumTestsMethods.py | 58 | 0.713752 | 0.713752 | 0 | 43 | 36.372093 | 77 |
tsutterley/gravity-toolkit | 18,176,301,617,695 | 9fe1e9b82a19791f9e72f20cb256c63e4b7f7485 | b11a35db38e93bf55456b21e96342870beec5d1f | /scripts/esa_costg_swarm_sync.py | 2f9a696e82ec26cb9824ce88696c22fa8df71d8d | [
"CC-BY-4.0",
"MIT"
]
| permissive | https://github.com/tsutterley/gravity-toolkit | f4ca6924b7037b0eb5b89c88b2f9efc9281420db | a673c2ba64c1b5d6fa69639b1d0f094d3999bc06 | refs/heads/main | 2023-08-22T14:26:00.137997 | 2023-08-16T19:49:45 | 2023-08-16T19:49:45 | 107,323,776 | 7 | 6 | MIT | false | 2023-09-07T18:09:44 | 2017-10-17T21:04:31 | 2023-07-29T03:00:07 | 2023-09-07T18:09:43 | 3,875 | 22 | 16 | 7 | Python | false | false | #!/usr/bin/env python
u"""
esa_costg_swarm_sync.py
Written by Tyler Sutterley (05/2023)
Syncs Swarm gravity field products from the ESA Swarm Science Server
https://earth.esa.int/eogateway/missions/swarm/data
https://www.esa.int/Applications/Observing_the_Earth/Swarm
CALLING SEQUENCE:
python esa_costg_swarm_sync.py
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory X: working data directory
-r X, --release X: Data release to sync
-t X, --timeout X: Timeout in seconds for blocking operations
-l, --log: output log of files downloaded
-L, --list: print files to be transferred, but do not execute transfer
-C, --clobber: Overwrite existing data in transfer
--checksum: compare hashes to check if overwriting existing data
-M X, --mode=X: Local permissions mode of the directories and files synced
PYTHON DEPENDENCIES:
lxml: Pythonic XML and HTML processing library using libxml2/libxslt
https://lxml.de/
https://github.com/lxml/lxml
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
UPDATE HISTORY:
Updated 05/2023: use pathlib to define and operate on paths
Updated 12/2022: single implicit import of gravity toolkit
Updated 11/2022: use f-strings for formatting verbose or ascii output
Updated 04/2022: use argparse descriptions within documentation
Updated 10/2021: using python logging for handling verbose output
Written 09/2021
"""
from __future__ import print_function
import sys
import re
import os
import io
import json
import time
import shutil
import logging
import pathlib
import argparse
import posixpath
import lxml.etree
import gravity_toolkit as gravtk
# PURPOSE: sync local Swarm files with ESA server
def esa_costg_swarm_sync(DIRECTORY, RELEASE=None, TIMEOUT=None, LOG=False,
LIST=False, CLOBBER=False, CHECKSUM=False, MODE=0o775):
# check if directory exists and recursively create if not
DIRECTORY = pathlib.Path(DIRECTORY).expanduser().absolute()
# local directory for exact data product
local_dir = DIRECTORY.joinpath('Swarm',RELEASE,'GSM')
local_dir.mkdir(mode=MODE, parents=True, exist_ok=True)
# create log file with list of synchronized files (or print to terminal)
if LOG:
# output to log file
# format: ESA_Swarm_sync_2002-04-01.log
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = DIRECTORY.joinpath(f'ESA_Swarm_sync_{today}.log')
logging.basicConfig(filename=LOGFILE, level=logging.INFO)
logging.info(f'ESA Swarm Sync Log ({today})')
else:
# standard output (terminal output)
logging.basicConfig(level=logging.INFO)
# Swarm Science Server url
# using the JSON api protocols to retrieve files
# static site is no longer available
HOST = 'https://swarm-diss.eo.esa.int'
# compile xml parsers for lxml
XMLparser = lxml.etree.XMLParser()
# create "opener" (OpenerDirector instance)
gravtk.utilities.build_opener(None, None,
authorization_header=False, urs=HOST)
# All calls to urllib2.urlopen will now use handler
# Make sure not to include the protocol in with the URL, or
# HTTPPasswordMgrWithDefaultRealm will be confused.
# compile regular expression operator for files
swarm_data = r'(SW)_(.*?)_(EGF_SHA_2)__(.*?)_(.*?)_(.*?)(\.gfc|\.ZIP)'
R1 = re.compile(swarm_data, re.VERBOSE)
# create combined list of filenames and last modified times
colnames = []
collastmod = []
# position, maximum number of files to list, flag to check if done
pos,maxfiles,prevmax = (0,500,500)
# iterate to get a compiled list of files
# will iterate until there are no more files to add to the lists
while (maxfiles == prevmax):
# set previous flag to maximum
prevmax = maxfiles
# open connection with Swarm science server at remote directory
# to list maxfiles number of files at position
parameters = gravtk.utilities.urlencode({'maxfiles':prevmax,
'pos':pos,'file':posixpath.join('swarm','Level2longterm','EGF')})
url=posixpath.join(HOST,f'?do=list&{parameters}')
request = gravtk.utilities.urllib2.Request(url=url)
response = gravtk.utilities.urllib2.urlopen(request,
timeout=TIMEOUT)
table = json.loads(response.read().decode())
# extend lists with new files
colnames.extend([t['name'] for t in table['results']])
collastmod.extend([t['mtime'] for t in table['results']])
# update maximum number of files
maxfiles = len(table['results'])
# update position
pos += maxfiles
# find lines of valid files
valid_lines = [i for i,f in enumerate(colnames) if R1.match(f)]
# write each file to an index
index_file = local_dir.joinpath(local_dir,'index.txt')
fid = index_file.open(mode='w', encoding='utf8')
# for each data and header file
for i in valid_lines:
# remote and local versions of the file
parameters = gravtk.utilities.urlencode({'file':
posixpath.join('swarm','Level2longterm','EGF',colnames[i])})
remote_file = posixpath.join(HOST,
f'?do=download&{parameters}')
local_file = local_dir.joinpath(colnames[i])
# check that file is not in file system unless overwriting
http_pull_file(remote_file, collastmod[i], local_file,
TIMEOUT=TIMEOUT, LIST=LIST, CLOBBER=CLOBBER,
CHECKSUM=CHECKSUM, MODE=MODE)
# output Swarm filenames to index
print(colnames[i], file=fid)
# change permissions of index file
index_file.chmod(mode=MODE)
# close log file and set permissions level to MODE
if LOG:
LOGFILE.chmod(mode=MODE)
# PURPOSE: pull file from a remote host checking if file exists locally
# and if the remote file is newer than the local file
def http_pull_file(remote_file, remote_mtime, local_file, TIMEOUT=120,
LIST=False, CLOBBER=False, CHECKSUM=False, MODE=0o775):
# if file exists in file system: check if remote file is newer
TEST = False
OVERWRITE = ' (clobber)'
# check if local version of file exists
local_file = pathlib.Path(local_file).expanduser().absolute()
if CHECKSUM and local_file.exists():
# generate checksum hash for local file
# open the local_file in binary read mode
local_hash = gravtk.utilities.get_hash(local_file)
# Create and submit request.
# There are a wide range of exceptions that can be thrown here
# including HTTPError and URLError.
req = gravtk.utilities.urllib2.Request(remote_file)
resp = gravtk.utilities.urllib2.urlopen(req,timeout=TIMEOUT)
# copy remote file contents to bytesIO object
remote_buffer = io.BytesIO(resp.read())
remote_buffer.seek(0)
# generate checksum hash for remote file
remote_hash = gravtk.utilities.get_hash(remote_buffer)
# compare checksums
if (local_hash != remote_hash):
TEST = True
OVERWRITE = f' (checksums: {local_hash} {remote_hash})'
elif local_file.exists():
# check last modification time of local file
local_mtime = local_file.stat().st_mtime
# if remote file is newer: overwrite the local file
if (gravtk.utilities.even(remote_mtime) >
gravtk.utilities.even(local_mtime)):
TEST = True
OVERWRITE = ' (overwrite)'
else:
TEST = True
OVERWRITE = ' (new)'
# if file does not exist locally, is to be overwritten, or CLOBBER is set
if TEST or CLOBBER:
# Printing files transferred
logging.info(f'{remote_file} --> ')
logging.info(f'\t{str(local_file)}{OVERWRITE}\n')
# if executing copy command (not only printing the files)
if not LIST:
# chunked transfer encoding size
CHUNK = 16 * 1024
# copy bytes or transfer file
if CHECKSUM and local_file.exists():
# store bytes to file using chunked transfer encoding
remote_buffer.seek(0)
with local_file.open(mode='wb') as f:
shutil.copyfileobj(remote_buffer, f, CHUNK)
else:
# Create and submit request.
# There are a range of exceptions that can be thrown here
# including HTTPError and URLError.
request = gravtk.utilities.urllib2.Request(remote_file)
response = gravtk.utilities.urllib2.urlopen(request,
timeout=TIMEOUT)
# copy remote file contents to local file
with local_file.open(mode='wb') as f:
shutil.copyfileobj(response, f, CHUNK)
# keep remote modification time of file and local access time
os.utime(local_file, (local_file.stat().st_atime, remote_mtime))
local_file.chmod(mode=MODE)
# PURPOSE: create argument parser
def arguments():
parser = argparse.ArgumentParser(
description="""Syncs Swarm gravity field products from the
ESA Swarm Science Server
"""
)
# command line parameters
# working data directory
parser.add_argument('--directory','-D',
type=pathlib.Path, default=pathlib.Path.cwd(),
help='Working data directory')
# data release
parser.add_argument('--release','-r',
type=str, default='RL01', choices=['RL01'],
help='Data release to sync')
# connection timeout
parser.add_argument('--timeout','-t',
type=int, default=360,
help='Timeout in seconds for blocking operations')
# Output log file in form
# ESA_Swarm_sync_2002-04-01.log
parser.add_argument('--log','-l',
default=False, action='store_true',
help='Output log file')
# sync options
parser.add_argument('--list','-L',
default=False, action='store_true',
help='Only print files that could be transferred')
parser.add_argument('--clobber','-C',
default=False, action='store_true',
help='Overwrite existing data in transfer')
parser.add_argument('--checksum',
default=False, action='store_true',
help='Compare hashes to check for overwriting existing data')
# permissions mode of the directories and files synced (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files synced')
# return the parser
return parser
# This is the main part of the program that calls the individual functions
def main():
# Read the system arguments listed after the program
parser = arguments()
args = parser.parse_args()
# check internet connection before attempting to run program
HOST = 'https://swarm-diss.eo.esa.int'
if gravtk.utilities.check_connection(HOST):
esa_costg_swarm_sync(args.directory, RELEASE=args.release,
TIMEOUT=args.timeout, LOG=args.log, LIST=args.list,
CLOBBER=args.clobber, CHECKSUM=args.checksum, MODE=args.mode)
# run main program
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 11,353 | py | 181 | esa_costg_swarm_sync.py | 82 | 0.656478 | 0.646789 | 0 | 271 | 40.892989 | 78 |
Sruthisarav/My-computer-science-journey | 9,698,036,191,937 | fdaeedb21977212231b7cf4420a4a13f9d9d7e3c | 6d6a935c8281984fc26201a3d505151382cd36a9 | /Introduction to Computer Science and Programming Using Python/Problem Set 4/Problem-4.py | 20c267e6ee7ecf6715f5971cc5ed71caf51d0e15 | [
"Giftware"
]
| permissive | https://github.com/Sruthisarav/My-computer-science-journey | 56a7663ebc9c45c029c68da943969cdcbc2a88ac | 1af1d33ea7f49c92e9b5b82c9c92b7076a93825a | refs/heads/master | 2020-04-14T16:45:04.805208 | 2020-01-15T02:45:07 | 2020-01-15T02:45:07 | 163,960,263 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
number=0
for l in hand:
number+=hand[l]
return number
| UTF-8 | Python | false | false | 242 | py | 157 | Problem-4.py | 21 | 0.607438 | 0.603306 | 0 | 11 | 21 | 63 |
seba90/pracmln | 2,705,829,420,156 | cecb3bd5ea051f87aa717c8075fa87ab4926c796 | bd5f00fb60ebd3d95a03bb05dad0c05e0cc5e009 | /python3/rosmln/scripts/mln_client.py | b21abfbbb51400c1ab43e25cea9237d7103f734b | [
"BSD-2-Clause"
]
| permissive | https://github.com/seba90/pracmln | c3cd11999d6752e28959fb5ebd3bd7d4614d48c2 | 2af9e11d72f077834cf130343a2506344480fb07 | refs/heads/master | 2020-07-29T11:04:16.199057 | 2019-10-05T18:19:50 | 2019-10-05T18:19:50 | 209,773,157 | 0 | 0 | BSD-2-Clause | true | 2019-09-20T11:18:03 | 2019-09-20T11:18:03 | 2019-09-16T18:21:57 | 2019-05-17T11:59:14 | 4,433 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
import sys
import rospy
from pracmln.utils import locs
from rosmln.srv import *
from rosmln.msg import *
def mln_interface_client(query, config=None):
rospy.wait_for_service('mln_interface')
try:
mln_interface = rospy.ServiceProxy('mln_interface', MLNInterface)
resp1 = mln_interface(query, config)
return resp1.response
except rospy.ServiceException, e:
print('Service call failed: %s'%e)
def print_results(results):
if not results.evidence:
print('ERROR: Something went wrong...')
else:
print results
if __name__ == '__main__':
mlnFiles = '{}/test/models/smokers/wts.pybpll.smoking-train-smoking.mln'.format(locs.user_data)
db = '{}/test/models/smokers/smoking-test-smaller.db'.format(locs.user_data)
queries = 'Smokes'
output_filename = 'results.txt'
query = MLNQuery(queries, None)
config = MLNConfig(mlnFiles, db, 'GibbsSampler', output_filename, True, 'FirstOrderLogic', 'PRACGrammar')
print_results(mln_interface_client(query, config))
print('Without config parameters')
print_results(mln_interface_client(query))
print('Without evidence')
config.db=''
query = MLNQuery(queries, ['Cancer(Ann)', '!Cancer(Bob)', '!Friends(Ann,Bob)'])
print_results(mln_interface_client(query, config))
| UTF-8 | Python | false | false | 1,345 | py | 91 | mln_client.py | 63 | 0.679554 | 0.678067 | 0 | 42 | 30.97619 | 110 |
pramodjha/snippet-app | 19,387,482,407,378 | ca6d41abce87e0381f96222724ff9b5fece1166a | b0f2ab27db5df6cca736897dc83e70541674dbe9 | /env/Scripts/mysite/myapp/urls.py | c7259f93f4f7d0d770b59d8f660cf73936eb2296 | []
| no_license | https://github.com/pramodjha/snippet-app | ebd681e88e40fcaf06a097792ca8d07a446e08df | f8a42dab24a42af1bc3a4ab9c233c80b31f9ed48 | refs/heads/master | 2022-12-05T21:01:58.791082 | 2019-04-29T08:51:27 | 2019-04-29T08:51:27 | 181,320,386 | 0 | 1 | null | false | 2022-11-18T21:12:58 | 2019-04-14T14:23:13 | 2019-04-29T08:52:02 | 2019-04-29T08:52:00 | 35,913 | 0 | 1 | 1 | null | false | false | from django.conf.urls import include,url
from . import views
urlpatterns = [
url(r'^signin/', views.signin, name='signin'),
url(r'^signup/', views.signup, name='signup'),
url(r'^signout/', views.signout, name='signout'),
url(r'^home/', views.home, name='home'),
url(r'^about/', views.about, name='about'),
url(r'^learn/', views.learn, name='learn'),
url(r'^blog/', views.blog, name='blog'),
url(r'^snippet/', views.snippet, name='snippet'),
url(r'^contact/', views.contact_form, name='contact'),
url(r'^snippet_like/', views.snippet_like, name='snippetlike'),
url(r'^thankyou/(?P<ty_id>\d+)$', views.thankyou, name='thankyou'),
url(r'^home_add_form/', views.home_add_form, name='homeaddform'),
url(r'^home_edit_form/(?P<home_id>\d+)$', views.home_edit_form, name='homeeditform'),
url(r'^about_add_form/', views.about_add_form, name='aboutaddform'),
url(r'^about_edit_form/(?P<slug>[\w-]+)/$', views.about_edit_form, name='abouteditform'),
url(r'^snippet_add_form/', views.snippet_add_form, name='snippetaddform'),
url(r'^snippet_edit_form/(?P<slug>[\w-]+)/$', views.snippet_edit_form, name='snippeteditform'),
url(r'^learn_add_form/', views.learn_add_form, name='learnaddform'),
url(r'^learn_edit_form/(?P<slug>[\w-]+)/$', views.learn_edit_form, name='learneditform'),
url(r'^snippet_topics_data_view/(?P<slug>[\w-]+)/$', views.snippet_topics_view, name='snippetview'),
url(r'^snippet_topics_add_form/(?P<slug>[\w-]+)/$', views.snippet_topics_add_form, name='snippettopicsaddform'),
url(r'^snippet_topics_edit_form/(?P<slug>[\w-]+)/$', views.snippet_topics_edit_form, name='snippettopicseditform'),
url(r'^snippet_topics/(?P<slug>[\w-]+)/$', views.snippet_topics, name='snippettopics'),
url(r'^learn_topics_data_view/(?P<slug>[\w-]+)/$', views.learn_topics_view, name='learnview'),
url(r'^learn_topics_add_form/(?P<slug>[\w-]+)/$', views.learn_topics_add_form, name='learntopicsaddform'),
url(r'^learn_topics_edit_form/(?P<slug>[\w-]+)/$', views.learn_topics_edit_form, name='learntopicseditform'),
url(r'^learn_topics/(?P<slug>[\w-]+)/$', views.learn_topics, name='learntopics'),
url(r'^blog_topics_add_form/', views.blog_topics_add_form, name='blogtopicsaddform'),
url(r'^blog_topics_edit_form/(?P<slug>[\w-]+)/$', views.blog_topics_edit_form, name='blogtopicseditform'),
url(r'^blog_topics/(?P<slug>[\w-]+)/$', views.blog_topics, name='blogtopics'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
]
| UTF-8 | Python | true | false | 2,529 | py | 40 | urls.py | 19 | 0.659945 | 0.654409 | 0 | 38 | 65.552632 | 116 |
Visual-mov/Tex-lang | 2,929,167,717,563 | ce518b71f9998cd29c42d9816eb1153d2d36b27c | 854b056cb9ad1f04576ad6a729ea5e4a6dc10a30 | /src/parse/tokenizer.py | 9d3269df11e1d8fdf642daaee8e91bc6bf09beb4 | [
"MIT"
]
| permissive | https://github.com/Visual-mov/Tex-lang | 4cec90f94287f7ebb6449a372069192722010916 | a7df37239f83cd253d6be24d10bcb8f195d1e698 | refs/heads/master | 2020-09-23T16:06:18.582127 | 2020-07-23T00:58:53 | 2020-07-23T00:58:53 | 225,537,315 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
from exceptions import LexerException
# Token types
EOF = "EOF"
L_OP = "LOGICAL_OPERATOR"
L_PAREN = "LEFT_PARENTHESIS"
R_PAREN = "RIGHT_PARENTHESIS"
L_BRACKET = "LEFT_BRACKET"
R_BRACKET = "RIGHT_BRACKET"
B_BLCK = "BEGIN_BLOCK"
OP = "OPERATOR"
ASSIGN = "ASSIGNMENT"
NUM = "NUMBER"
STR = "STRING"
ID = "IDENTIFIER"
KEY = "KEYWORD"
# Token class
class Token:
def __init__(self, line, literal, type):
self.line = line
self.literal = literal
self.type = type
def __repr__(self):
return f'{self.line} | {self.type} : {repr(self.literal)}'
# Lexer class
# Takes the source text and converts it into smaller tokens (tokenization).
class Tokenizer:
def __init__(self, source, line=None):
self.index = 0
if len(source) > 0:
self.curtok = source[self.index]
self.source = source
self.line = 1 if line == None else line
self.tokens = []
self.keywords = [
"check", "celse",
"else", "while",
"true","false",
"print", "println",
"end", "continue",
"break"
]
def lex(self):
while self.index < len(self.source):
c = self.source[self.index]
cp = self.peek()
if c == '~':
self.scan("\n")
elif c == '\n':
self.line += 1
elif c == '-':
self.double_lexeme(c, cp, '>', OP, ASSIGN)
elif c == "\"":
self.tokens.append(Token(self.line, self.scan(c), STR))
self.advance()
elif c in ('>', '<', '!'):
self.double_lexeme(c, cp, '=', L_OP)
elif c == '=':
self.tokens.append(Token(self.line, c, L_OP))
elif c == self.peek():
if c in ('&', '|'): self.tokens.append(Token(self.line, c + self.peek(), L_OP))
elif c == ':':
self.tokens.append(Token(self.line, c, B_BLCK))
elif self.m("[][]", c):
self.tokens.append(Token(self.line, c, L_BRACKET if c == '[' else R_BRACKET))
elif str.isdecimal(c):
self.tokens.append(Token(self.line, self.get_digit(), NUM))
elif str.isalpha(c) or c in ('_'):
self.tokens.append(self.get_char_token())
elif self.m("[+/*^%]", c):
self.tokens.append(Token(self.line, c, OP))
elif self.m("[()]", c):
self.tokens.append(Token(self.line, c, L_PAREN if c == '(' else R_PAREN))
self.advance()
self.append_EOF()
return self.tokens
def m(self, pat, char):
return re.match(pat, char) != None
def peek(self):
index = self.index + 1
return self.source[index] if index < len(self.source) else None
def get_char_token(self):
result = self.scan_match("[a-zA-Z_0-9]")
return Token(self.line, result, ID) if result not in self.keywords else Token(self.line, result, KEY)
def get_digit(self):
val = self.scan_match("[0-9.]")
try: return float(val)
except ValueError:
raise LexerException(self.line, "Error lexing Float")
def double_lexeme(self, c, cp, expected_seek, type1, type2=L_OP):
if cp != expected_seek:
self.tokens.append(Token(self.line, c, type1))
else:
self.tokens.append(Token(self.line, c+cp, type2))
self.advance()
def scan(self, expected_c):
found = ""
for index in range(self.index, len(self.source)):
c = self.source[index]
found += c if c != expected_c else ""
if self.peek() == expected_c: break
elif c == EOF or index == len(self.source) - 1:
raise LexerException(self.line, f"Expected '{expected_c}' character")
else: self.advance()
return found
def scan_match(self, pat):
found = ""
while self.index < len(self.source) and re.match(pat, str(self.source[self.index])) != None:
found += self.source[self.index]
self.advance()
self.index-=1
return found
def advance(self):
self.index+=1
def append_EOF(self):
self.tokens.append(Token(self.line, "", EOF))
def print_tokens(self):
for Token in self.tokens: print(Token) | UTF-8 | Python | false | false | 4,458 | py | 9 | tokenizer.py | 6 | 0.518843 | 0.515253 | 0 | 135 | 32.02963 | 109 |
analogpixel/roverGame | 11,476,152,618,667 | 2da504ff9cfd450c839a45e74da7f2ee8e3c811d | 323a6820446f65d8b989109896dd164f178b9ca4 | /spriteFunctions.py | 4369b88c71639b9ee796ffca3c142b626feac242 | []
| no_license | https://github.com/analogpixel/roverGame | a36d7cb9b3be4369b90a25be1b42e7a7485d3c47 | 9d5a6888c3da1bfec6fdc03e52071ebb153f37f9 | refs/heads/master | 2021-03-12T19:18:22.622176 | 2015-04-28T01:58:41 | 2015-04-28T01:58:41 | 28,829,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Function to handle sprites
"""
import os.path
import pygame
from localutil import *
import math
import time
def createSprite(name, system):
#def createSprite(name, pos=(0,0)):
"""
Create a sprite object
"""
system["sprite_" + name] = {
"name": name,
"r": 90,
"rdest": 90,
"state": "stopped",
"oldState": "stopped",
"moveable": True,
"imageCache": {},
"active": False,
"sound": False
}
return system
def updateState(system):
# http://www.pygame.org/docs/ref/mixer.html
if system['sprite_robot']['oldState'] != system['sprite_robot']['state']:
oldState = "%s_%s" % (system['sprite_robot']['name'], system['sprite_robot']['oldState'])
newState = "%s_%s" % (system['sprite_robot']['name'], system['sprite_robot']['state'])
print(oldState, newState)
# stop any existing sounds
if oldState in system['CONFIG']['C_SOUNDS']:
system['CONFIG']['C_SOUNDS'][oldState]['sound'].stop()
if newState in system['CONFIG']['C_SOUNDS']:
system['CONFIG']['C_SOUNDS'][newState]['sound'].play(\
loops=system['CONFIG']['C_SOUNDS'][newState]['loop'])
time.sleep(0.2)
system['sprite_robot']['oldState'] = system['sprite_robot']['state']
return system
def rot_center(image, angle):
"""rotate an image while keeping its center and size"""
orig_rect = image.get_rect()
rot_image = pygame.transform.rotate(image, angle)
#rot_rect = orig_rect.copy()
#rot_rect.center = rot_image.get_rect().center
#rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
def drawSprite(name, system):
"""
draw a sprite and return the updated data
"""
imageFile = "resources/%s_%s.png" % (name, system['sprite_' + name]['state'])
if not (imageFile in system['sprite_' + name ]['imageCache']):
system['sprite_' + name]['imageCache'][imageFile] = pygame.image.load(imageFile).convert_alpha()
fps = system['CONFIG']['C_FPS']
frameCount = system['sprite_' + name]['imageCache'][imageFile].get_width() / 100
currentFrame = int(translate( system['tic'] % fps , 0, fps, 0, frameCount))
s = pygame.Surface((100,100), pygame.SRCALPHA, 16).convert_alpha()
s.blit( system['sprite_' + name]['imageCache'][imageFile] , (0,0) , ( currentFrame * 100, 0, 100,100))
s = rot_center(s , int(system['sprite_' + name]['r']) * -1)
system['screen'].blit( s , (system['sprite_' + name]['x'] , system['sprite_' + name]['y'] ) )
return system
def moveSprite(system):
for c in ['x','y','r']:
if not( system['sprite_robot'][c] == system['sprite_robot'][c + "dest"]):
system['sprite_robot'][c] += 5 * \
int( (system['sprite_robot'][c + "dest"] - \
system['sprite_robot'][c]) / \
abs(system['sprite_robot'][c + "dest"] - \
system['sprite_robot'][c]) )
return system
def drawCommands(system):
if not ("commandq" in system):
return system
x = 0
system['controlImage'].fill( pygame.Color(0,0,0,0) )
for command in reversed(system['commandq']):
if command in system['commandLayout']:
system['controlImage'].blit( system['commandImage'] , (x, 0), (system['commandLayout'][command], 0, 100,100))
x = x + system['tileHeight']
return system
def drawMenu(system):
color = False
# re-draw the part of the menu that gets clobberd by the moving ball
# otherwise leave the rest alone.
system['screen'].blit( system['menuImage'], (50,100), (50, 100, 200,200 ))
for i in range(0, system['maxMap'] + 1):
pygame.draw.circle( system['screen'], (55,113,200), (560, 510 + 40 * system['currentMap']) ,10)
system = text("Mission " + str( i + 1) ,500, 500 + i * 40, system)
return system
def text(t,x,y, system):
if pygame.font:
font = pygame.font.Font(None, 36)
text = font.render(t, 1, (10, 10, 10))
textpos = text.get_rect(centerx=system['screen'].get_width()/2 )
textpos[1] = y
system['screen'].blit(text, textpos)
return system
| UTF-8 | Python | false | false | 4,120 | py | 9 | spriteFunctions.py | 6 | 0.594175 | 0.568932 | 0 | 125 | 31.96 | 115 |
ghostcfl/tb_order_project | 12,455,405,187,177 | ebce1046d85bdd596c8877c9dd3fb1209dd85754 | 34bffeeafe6ff19f9e5b1438208b0c3777c4eee5 | /tools/request_headers.py | 4126edb70eb37468812d099e636ad915bcb60ccb | []
| no_license | https://github.com/ghostcfl/tb_order_project | 623f9488e6e8c082ee6a94f641a46c874b8ee3ec | 7e4e1b9db037d3057ba1c5303cc4fa20631d3a4e | refs/heads/master | 2021-05-24T07:43:13.155124 | 2020-05-22T03:52:57 | 2020-05-22T03:52:57 | 253,455,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 获得随机的user_agent的请求头
import requests
import shelve
import re
import random
import os
from pyquery import PyQuery
def get_user_agent():
with shelve.open(os.path.dirname(__file__) + "/user_agent/data") as db:
user_agents = db['user_agent']
return random.choice(user_agents)
def get_request_headers():
"""
获取随机的请求头
:return: headers
"""
headers = {
"User-Agent": get_user_agent(),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "item.taobao.com",
"Upgrade-Insecure-Requests": "1",
}
return headers
def set_user_agent():
"""
通过useragentstring.com爬取user_agents,并存储在本地文件中
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Referer': 'http://useragentstring.com/pages/useragentstring.php',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0',
}
params = (
('typ', 'Browser'),
)
response = requests.get('http://useragentstring.com/pages/useragentstring.php', headers=headers, params=params)
html = response.text
doc = PyQuery(html)
items = doc("ul li a").items()
list_browsers = [item.text() for item in items if
len(item.text()) > 80 and not re.search('pad|phone', item.text(), re.I)]
print(list_browsers)
with shelve.open(os.path.dirname(__file__) + "/user_agent/data") as db:
db['user_agent'] = list_browsers
| UTF-8 | Python | false | false | 1,993 | py | 29 | request_headers.py | 27 | 0.603832 | 0.574832 | 0 | 59 | 31.728814 | 115 |
JulyKikuAkita/PythonPrac | 15,788,299,788,559 | d3a872750b3dd61ad12bb9f3ec1d1838856af649 | cc578cec7c485e2c1060fd075ccc08eb18124345 | /cs15211/0Note_MergeTwoArraysRandomly.py | 90de315783f7507d811db3168253f97199bf911d | [
"Apache-2.0"
]
| permissive | https://github.com/JulyKikuAkita/PythonPrac | 18e36bfad934a6112f727b4906a5e4b784182354 | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | refs/heads/master | 2021-01-21T16:49:01.482561 | 2019-02-07T06:15:29 | 2019-02-07T06:15:29 | 91,907,704 | 1 | 1 | Apache-2.0 | false | 2019-02-07T06:15:30 | 2017-05-20T18:12:53 | 2019-01-15T23:42:09 | 2019-02-07T06:15:30 | 2,925 | 0 | 1 | 0 | Python | false | null | '''
# https://leetcode.com/forums/viewtopic.php?f=4&t=146&mc_cid=83951cfa08&mc_eid=cad0363a72
Given array a, and b.
Merge them into one array c, keep the order of elements in each array.
There are many possible results for c.
Your solution should randomly generate one of them with same probability.
For example, a = [1], b = [100, 200].
Then possible results may be [1, 100, 200], [100, 1, 200], or, [100, 200, 1].
And your algorithm should have 1/3 probability to generate each of them.
What the complexity of your time / extra space?
What if we treat [1,1] as duplicates?
Let me give another clearer example to illustrate this:
Given a = [1], b = [1, 2].
The possible results may be [1, 1, 2], [1, 1, 2], or [1, 2, 1].
Since two of the results are duplicate, should [1, 1, 2] appears with 1/2 probability instead of 1/3 probability?
If yes, you need to first determine how many duplicate results are there by merging all possibilities, which may be tricky.
Then the probability of choosing the first element from a vs the first element from b is
m/(m+n) and n/(m+n) respectively.
Keep repeating the previous step until one of the array run out of elements (which the probability becomes zero).
Runtime complexity is O(m + n) and no extra space (except for storing the output array c).
'''
from random import *
class Solution:
a = map(int, raw_input().split())
b = map(int, raw_input().split())
def C(self, m, n):
f = lambda x, y: y <= 0 and 1 or x * f(x - 1, y - 1)
return f(m, n) / f(n, n)
def solve(self, a, b):
l = []
m, n = len(a), len(b)
i, j = 0, 0
while m > 0 and n > 0:
L1, L2 = self.C(m - 1 + n, m - 1), self.C(m + n - 1, n - 1)
if randint(1, L1 + L2) > L1:
l.append(b[j])
j, n = j + 1, n - 1
else:
l.append(a[i])
i, m = i + 1, m - 1
if m > 0:
l += a[i:]
elif n > 0:
l += b[j:]
return l | UTF-8 | Python | false | false | 2,024 | py | 956 | 0Note_MergeTwoArraysRandomly.py | 955 | 0.594862 | 0.549407 | 0 | 59 | 33.322034 | 123 |
skorpk/LoadAccounts | 13,151,189,899,095 | 0953583e64ff2e06ae033f5e8db727e2d3010008 | 39b1186364c2fded9168e15ca3c52e1fcadbcf70 | /SQLProjects/xmlFileToSendToFFMOS.py | 8d419d562558e97276eddd3aae9a9ea01b447af9 | []
| no_license | https://github.com/skorpk/LoadAccounts | c53ac99922e9c0ba6873059ab24df0313e2ffeb0 | 1d473ecbc3d37b790b298442585dcfe1c1d2583c | refs/heads/master | 2021-07-21T15:10:32.600894 | 2017-11-01T13:41:31 | 2017-11-01T13:41:31 | 109,136,186 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 21 марта 2016 г.
1.Get data from SQL Server.
2.Create xml file like TKR3416*
3.The data will be sended into FFOMS.
@author: SKrainov
'''
sqlConnStr = ('DRIVER={SQL Server};Server=srv-cnt-db2;Database=AccountOMS;'+
'Trusted_Connection=YES')
import pyodbc #to connect to SQL Server
import os
import zipfile
def createZIP_DelXML(pathName,FileName):
zipf = zipfile.ZipFile(os.path.join(pathName, FileName+".oms"), 'w',zipfile.ZIP_DEFLATED)
zipf.write(pathName+'/'+FileName+".xml",FileName+'.xml')
zipf.close()
os.remove(pathName+'/'+FileName+".xml")
''' Insert information about file'''
def insertIntoSendingFile(fileNameXML,reportMonth,reportYear,code):
sqlConn = pyodbc.connect(sqlConnStr, autocommit = True)
curs = sqlConn.cursor()
print(fileNameXML)
curs.execute("EXEC dbo.usp_InsertSendingInformationAboutFile @nameFile =?, @reportMonth =?, @reportYear =?, @code = ?",fileNameXML,reportMonth,reportYear,code)
def getXML(nameFile,reportMonth,reportYear,code,pathName):
fileNameXML=fName+('000'+str(code))[-4:]
insertIntoSendingFile(nameFile,reportMonth,reportYear,code)
sqlConn = pyodbc.connect(sqlConnStr, autocommit = True)
curs = sqlConn.cursor()
curs.execute("EXEC dbo.usp_GetXMLSendingDataToFFOMS @nameFile=?,@reportMonth=?,@reportYear=?,@code=? ",nameFile,reportMonth,reportYear,code)
for r in curs:
file=open(os.path.join(pathName,fileNameXML+".xml"), mode='w')
file.write('<?xml version="1.0" encoding="Windows-1251"?>')
file.write(r.colXML)
file.close()
'''createZIP_DelXML(pathName,fileNameXML)'''
sqlConn = pyodbc.connect(sqlConnStr, autocommit = True)
dirName=r"f:\test\TKRFiles"
'''
Данные передаются пользователем
в reportMM ставим отчетный месяц
'''
reportMM=9
reportYYYY=2017
fName='TKR34'+(str(reportYYYY)[-2:])
'''
sqlConn = pyodbc.connect(sqlConnStr, autocommit = True)
curs = sqlConn.cursor()
curs.execute("SELECT ISNULL(MAX(NumberOfEndFile),0)+1 as Number FROM dbo.t_SendingFileToFFOMS WHERE ReportYear=?", reportYYYY)
for row in curs:
print(fName+('000'+str(row.Number))[-4:])
getXML(fName,reportMM,reportYYYY,row.Number,dirName)
print('File unloaded')
reportMM=reportMM+1
'''
| UTF-8 | Python | false | false | 2,473 | py | 2 | xmlFileToSendToFFMOS.py | 2 | 0.665012 | 0.648883 | 0 | 64 | 35.640625 | 163 |
brownplt/insta-model | 15,762,530,003,749 | c7e2528b798b951ea7d58367c8a0eb84f839135a | eb87c8b1ce8591d207643d3924b7939228f1a4fe | /conformance_suite/CheckedDict_lookup_dom_dyn_good.py | 3b512651543dfe0ca10274207ca3d62dbf237a5b | []
| no_license | https://github.com/brownplt/insta-model | 06543b43dde89913c219d476ced0f51a439add7b | 85e2c794ec4b1befa19ecb85f2c8d2509ec8cf42 | refs/heads/main | 2023-08-30T19:06:58.083150 | 2023-05-03T18:53:58 | 2023-05-10T22:29:18 | 387,500,638 | 5 | 0 | null | false | 2022-04-23T23:06:52 | 2021-07-19T14:53:09 | 2022-04-04T00:24:45 | 2022-04-23T23:06:52 | 1,594 | 1 | 0 | 0 | Racket | false | false | # CheckedDict_lookup_dom_dyn_good.py
# This should pass.
# This should terminate.
from __static__ import CheckedDict
def asDyn(x):
return x
x: CheckedDict[str, int] = CheckedDict[str, int]({"foo": 2})
asDyn(x)["foo"] | UTF-8 | Python | false | false | 223 | py | 378 | CheckedDict_lookup_dom_dyn_good.py | 338 | 0.686099 | 0.681614 | 0 | 11 | 19.363636 | 60 |
gitfish256/autoloadtest | 6,141,803,268,876 | e8445f9002b33249f5c572e32ec6930197f3f265 | 15d0d1347626efcc6041a5963d39d86e3c194ef2 | /perf_parser/sadf_parser.py | 14bf43154a6dcb979360dbd34e14a2e8ebdf9334 | [
"MIT"
]
| permissive | https://github.com/gitfish256/autoloadtest | a9747057e82847b4921a9a67e120ccb7c2da691d | 270d0b952200c597d0ef5a953a6088b6c529cb71 | refs/heads/master | 2021-06-17T14:56:28.543624 | 2017-06-08T05:32:30 | 2017-06-08T05:32:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas
import sys
import re
import subprocess
project_path = sys.path[0] + "\\.."
sys.path.append(project_path)
import data_processor.summary_controller as sc
import os
import fnmatch
class sadf_parser(object):
"""description of class"""
#timestamp_name = "T_Time"
timestamp_name = "T_Time"
header_list = [
"# hostname;interval;timestamp;CPU;%usr;%nice;%sys;%iowait;%steal;%irq;%soft;%guest;%gnice;%idle",
"# hostname;interval;timestamp;proc/s;cswch/s",
"# hostname;interval;timestamp;INTR;intr/s",
"# hostname;interval;timestamp;pswpin/s;pswpout/s",
"# hostname;interval;timestamp;pgpgin/s;pgpgout/s;fault/s;majflt/s;pgfree/s;pgscank/s;pgscand/s;pgsteal/s;%vmeff",
"# hostname;interval;timestamp;tps;rtps;wtps;bread/s;bwrtn/s",
"# hostname;interval;timestamp;frmpg/s;bufpg/s;campg/s",
"# hostname;interval;timestamp;kbmemfree;kbmemused;%memused;kbbuffers;kbcached;kbcommit;%commit;kbactive;kbinact;kbdirty",
"# hostname;interval;timestamp;kbswpfree;kbswpused;%swpused;kbswpcad;%swpcad",
"# hostname;interval;timestamp;kbhugfree;kbhugused;%hugused",
"# hostname;interval;timestamp;dentunusd;file-nr;inode-nr;pty-nr",
"# hostname;interval;timestamp;runq-sz;plist-sz;ldavg-1;ldavg-5;ldavg-15;blocked",
# all 0 line, header has 10 but has 15 columns number
# "# hostname;interval;timestamp;TTY;rcvin/s;txmtin/s;framerr/s;prtyerr/s;brk/s;ovrun/s",
"# hostname;interval;timestamp;DEV;tps;rd_sec/s;wr_sec/s;avgrq-sz;avgqu-sz;await;svctm;%util",
"# hostname;interval;timestamp;IFACE;rxpck/s;txpck/s;rxkB/s;txkB/s;rxcmp/s;txcmp/s;rxmcst/s;%ifutil",
"# hostname;interval;timestamp;IFACE;rxerr/s;txerr/s;coll/s;rxdrop/s;txdrop/s;txcarr/s;rxfram/s;rxfifo/s;txfifo/s",
"# hostname;interval;timestamp;call/s;retrans/s;read/s;write/s;access/s;getatt/s",
"# hostname;interval;timestamp;scall/s;badcall/s;packet/s;udp/s;tcp/s;hit/s;miss/s;sread/s;swrite/s;saccess/s;sgetatt/s",
"# hostname;interval;timestamp;totsck;tcpsck;udpsck;rawsck;ip-frag;tcp-tw",
"# hostname;interval;timestamp;irec/s;fwddgm/s;idel/s;orq/s;asmrq/s;asmok/s;fragok/s;fragcrt/s",
"# hostname;interval;timestamp;ihdrerr/s;iadrerr/s;iukwnpr/s;idisc/s;odisc/s;onort/s;asmf/s;fragf/s",
"# hostname;interval;timestamp;imsg/s;omsg/s;iech/s;iechr/s;oech/s;oechr/s;itm/s;itmr/s;otm/s;otmr/s;iadrmk/s;iadrmkr/s;oadrmk/s;oadrmkr/s",
"# hostname;interval;timestamp;ierr/s;oerr/s;idstunr/s;odstunr/s;itmex/s;otmex/s;iparmpb/s;oparmpb/s;isrcq/s;osrcq/s;iredir/s;oredir/s",
"# hostname;interval;timestamp;active/s;passive/s;iseg/s;oseg/s",
"# hostname;interval;timestamp;atmptf/s;estres/s;retrans/s;isegerr/s;orsts/s",
"# hostname;interval;timestamp;idgm/s;odgm/s;noport/s;idgmerr/s",
"# hostname;interval;timestamp;tcp6sck;udp6sck;raw6sck;ip6-frag",
"# hostname;interval;timestamp;irec6/s;fwddgm6/s;idel6/s;orq6/s;asmrq6/s;asmok6/s;imcpck6/s;omcpck6/s;fragok6/s;fragcr6/s",
"# hostname;interval;timestamp;ihdrer6/s;iadrer6/s;iukwnp6/s;i2big6/s;idisc6/s;odisc6/s;inort6/s;onort6/s;asmf6/s;fragf6/s;itrpck6/s",
"# hostname;interval;timestamp;imsg6/s;omsg6/s;iech6/s;iechr6/s;oechr6/s;igmbq6/s;igmbr6/s;ogmbr6/s;igmbrd6/s;ogmbrd6/s;irtsol6/s;ortsol6/s;irtad6/s;inbsol6/s;onbsol6/s;inbad6/s;onbad6/s",
"# hostname;interval;timestamp;ierr6/s;idtunr6/s;odtunr6/s;itmex6/s;otmex6/s;iprmpb6/s;oprmpb6/s;iredir6/s;oredir6/s;ipck2b6/s;opck2b6/s",
"# hostname;interval;timestamp;idgm6/s;odgm6/s;noport6/s;idgmer6/s"]
def __init__(self, sadf_file_in, target_interval, sig_name):
self.filter_sig_list = {'CPU':'-1', 'INTR':'-1', 'DEV':'dev8-0', 'IFACE':'eth0'}
self.parse_header_file = "perf_parser\\parse_list"
self.unused_column_list = ["# hostname", "interval"]
self.sadf_file_in = sadf_file_in
self.sadf_file_handle = open(sadf_file_in, 'r')
self.sadf_info = self.sadf_file_handle.readlines()
self.default_delimiter = ';'
self.target_interval = target_interval
self.sig_name = sig_name
def get_sub_df(self, header):
ret_list = []
iter_start = self.sadf_info.index(header + '\n')
for temp_line in self.sadf_info[iter_start + 1:]:
if temp_line.startswith('#'):
break
ret_list.append(temp_line.rstrip().split(';'))
ret_df = pandas.DataFrame(ret_list, columns = header.split(';'))
return ret_df
def get_cpu_df(self):
ret_df = self.get_sub_df(self.header_list[0])
return ret_df
def get_per_core_stats(self):
all_cpu_df = self.get_cpu_df()
core0_df = self.clear_unused_column(self.filter_sub_df(all_cpu_df, {'CPU':'0'}))
core1_df = self.clear_unused_column(self.filter_sub_df(all_cpu_df, {'CPU':'1'}))
merged_df = pandas.merge(core0_df, core1_df, how = 'inner', on = 'timestamp', suffixes = ['_0', '_1'])
return merged_df
def get_core_series_by_suffix(self, input_line, suffix, output_suffix):
temp_header = input_line.index.tolist()
ret_header = [i for i in temp_header if re.search(suffix, i)]
ret_series = input_line[ret_header]
core_header = [x.replace(suffix, output_suffix) for x in ret_header]
ret_series.index = core_header
return ret_series
def get_core_mapping_df(self, sadf_df, sum_core_header, sum_suffix):
df_out = pandas.DataFrame()
for index, temp_line in sadf_df.iterrows():
target_core = temp_line[sum_core_header]
if target_core == temp_line['CPU_0']:
ret_series = self.get_core_series_by_suffix(temp_line, '_0', sum_suffix)
elif target_core == temp_line['CPU_1']:
ret_series = self.get_core_series_by_suffix(temp_line, '_1', sum_suffix)
else:
print 'error in core mapping'
df_out = df_out.append(ret_series, ignore_index = True)
df_out.columns = ret_series.index
return df_out
def filter_sub_df(self, df_in, filter_sig_list):
filtered_flag = False
for key, val in filter_sig_list.iteritems():
if key in df_in.columns.tolist():
df_out = df_in[df_in[key] == val]
filtered_flag = True
if not filtered_flag:
df_out = df_in
return df_out
def clear_unused_column(self, df_in):
df_out = df_in
for temp_drop in self.unused_column_list:
df_out = df_out.drop(temp_drop, 1)
return df_out
def get_filtered_df_by_header(self, header):
temp_df = self.get_sub_df(header)
temp_df = self.filter_sub_df(temp_df, self.filter_sig_list)
temp_df = self.clear_unused_column(temp_df)
return temp_df
def scale_df_by_time(self, lower_time, upper_time, df_in):
temp_df = df_in[(df_in['timestamp'] > lower_time) & (df_in['timestamp'] <= upper_time)]
#list_out = [pandas.DataFrame.mean(column) for column in temp_df]
series_out = pandas.DataFrame.mean(temp_df)
series_out['timestamp'] = upper_time
return series_out
def covert_df_to_float(self, df_in):
df_out = df_in.apply(lambda f : pandas.to_numeric(f, errors='coerce'))
df_out = df_out.dropna(axis = 1)
return (df_out)
def scale_df_by_timestamp(self, timestamp_list, df_in, interval_in):
df_out = pandas.DataFrame()
temp_upper_time = timestamp_list[0]
temp_lower_time = timestamp_list[0] - interval_in
temp_out = self.scale_df_by_time(temp_lower_time, temp_upper_time, df_in)
df_out = df_out.append(temp_out, ignore_index = True)
index = 1
for temp_time in timestamp_list[index:]:
temp_lower_time = timestamp_list[index - 1]
temp_upper_time = timestamp_list[index]
temp_out = self.scale_df_by_time(temp_lower_time, temp_upper_time, df_in)
df_out = df_out.append(temp_out, ignore_index = True)
index += 1
return df_out
def merge_to_summary(self, sum_name_in, df_in):
temp_sum = pandas.read_csv(sum_name_in)
df_rename = df_in.rename(columns = {'timestamp':self.timestamp_name})
df_out = pandas.merge(temp_sum, df_rename, how = 'inner', on = self.timestamp_name)
return df_out
def if_over_threshold(self, core_df_in):
new_col = core_df_in.filter(regex = "%idle_\w")
temp_col_name = new_col.columns.tolist()
new_header = [re.sub("%idle", "ifover", temp_col_name[0])]
#see if 100 - idle time larger than 50% with 95% conf interval
new_content = ((100 - new_col) >= 50 * 0.95)
new_content.columns = new_header
return new_content
def scale_by_CalTime(self, sum_name_in, org_interval, target_interval):
replace_string = self.sig_name + "_"
sum_name_out = sum_name_in.replace(replace_string + org_interval, replace_string + str(target_interval))
R_result = subprocess.Popen(["Rscript",
'CalTime.R',
sum_name_in,
str(target_interval),
sum_name_out],
stdout=subprocess.PIPE)#,
# cwd = self.Rscript_cwd)
out, err = R_result.communicate()
def remove_na(self, merge_df):
merge_df = pandas.DataFrame.dropna(merge_df)
merge_df = merge_df.reset_index(drop = True)
return merge_df
def main(folder_in):
os.chdir(folder_in)
sadf_name = ''
sum_name = ''
target_interval = str(5)
sig_name = "L1"
for file in os.listdir('.'):
if fnmatch.fnmatch(file, 'sadf*'):
sadf_name = file
if fnmatch.fnmatch(file, '*-' + sig_name + '_' + target_interval + '.csv'):
sum_name = file
print sadf_name
print sum_name
num_name = sum_name.replace("-" + sig_name + "_" + target_interval, "-num_all_" + sig_name + "_" + target_interval)
thres_name = sum_name.replace("-" + sig_name + "_" + target_interval, "-thres_all_" + sig_name + "_" + target_interval)
default_name = sum_name.replace("-" + sig_name + "_" + target_interval, "-default_all_" + sig_name + "_" + target_interval)
all_name = sum_name.replace("-" + sig_name + "_" + target_interval, "-opt23_all_" + sig_name + "_" + target_interval)
core_name = sum_name.replace("-" + sig_name + "_" + target_interval, "-core_all_" + sig_name + "_" + target_interval)
map_name = sum_name.replace("-" + sig_name + "_" + target_interval, "-map_all_" + sig_name + "_" + target_interval)
temp_sadf = sadf_parser(sadf_name, target_interval, sig_name)
all_df = temp_sadf.get_filtered_df_by_header(temp_sadf.header_list[0])
for temp_header in temp_sadf.header_list[1:]:
temp_df = temp_sadf.get_filtered_df_by_header(temp_header)
all_df = pandas.merge(all_df, temp_df, how = 'inner', on = 'timestamp')
#get per core stats
per_core_df = temp_sadf.get_per_core_stats()
all_df = pandas.merge(all_df, per_core_df, how = 'inner', on = 'timestamp')
all_df = temp_sadf.covert_df_to_float(all_df)
#get sum and merge to sum based on sum's timestamp
test_sum = sc.summary_controller(sum_name)
temp_timelist = test_sum.get_timestamp()
scaled_df = temp_sadf.scale_df_by_timestamp(temp_timelist, all_df, int(target_interval))
merge_df = temp_sadf.merge_to_summary(sum_name, scaled_df)
#remove columns containing NaN (produced when align sar and pidstat logs)
merge_df = temp_sadf.remove_na(merge_df)
merge_df.to_csv(default_name, index = False)
temp_merge_df = merge_df
'''
#get core threshold
T_core_thres = temp_sadf.if_over_threshold(T_core_df)
M_core_thres = temp_sadf.if_over_threshold(M_core_df)
temp_merge_df = temp_merge_df.join(T_core_thres)
temp_merge_df = temp_merge_df.join(M_core_thres)
temp_merge_df.to_csv(thres_name, index = False)
'''
#test: merge pidstat with per core as training
per_core_df = temp_sadf.covert_df_to_float(per_core_df)
scaled_with_core = temp_sadf.scale_df_by_timestamp(temp_timelist, per_core_df, int(target_interval))
scaled_with_core = temp_sadf.merge_to_summary(sum_name, scaled_with_core)
scaled_with_core = temp_sadf.remove_na(scaled_with_core)
scaled_with_core.to_csv(core_name, index = False)
#get core mapping
T_core_df = temp_sadf.get_core_mapping_df(merge_df, 'T_CPU', '_T')
M_core_df = temp_sadf.get_core_mapping_df(merge_df, 'M_CPU', '_M')
#output df
merge_df = merge_df.join(T_core_df)
merge_df = merge_df.join(M_core_df)
merge_df = temp_sadf.remove_na(merge_df)
merge_df.to_csv(num_name, index = False)
#save one containing all columes to check
temp_merge_df = temp_merge_df.join(T_core_df)
temp_merge_df = temp_merge_df.join(M_core_df)
temp_merge_df = temp_sadf.remove_na(temp_merge_df)
temp_merge_df.to_csv(all_name, index = False)
temp_sadf.scale_by_CalTime(num_name, target_interval, 60)
temp_sadf.scale_by_CalTime(default_name, target_interval, 60)
temp_sadf.scale_by_CalTime(core_name, target_interval, 60)
if __name__ == '__main__':
main(sys.argv[1])
| UTF-8 | Python | false | false | 13,555 | py | 30 | sadf_parser.py | 26 | 0.622206 | 0.612984 | 0 | 324 | 40.830247 | 196 |
volphie/DeepLearning | 17,128,329,593,232 | bfd7da50789f29df4ca29a39b2282f2fa2fef62b | 32b531bb963cf00425b173011282dc5b7308fcdd | /recognize_digit_MNIST.py | a7cf8597f75d873158d257988095f54ed0ff332d | []
| no_license | https://github.com/volphie/DeepLearning | 18831d3c91478557d166187c7599806bd4a4b81b | fc9e808445e7f8bd5a45ec6527f1395669586daf | refs/heads/master | 2020-03-23T06:56:20.894272 | 2018-07-17T07:05:48 | 2018-07-17T07:05:48 | 141,238,306 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
# MNIST 데이터를 사용할 때 항상 사용되는 문장
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # 현재 directory 기준으로
import tensorflow as tf
import time
t1 = time.time()
# Neural Network Core Model... Start here
num_steps = 5000
batch_size = 128
display_step = 100
num_input = 784
num_classes = 10
n_hidden_1 = 256
n_hidden_2 = 256
n_hidden_3 = 256
n_hidden_4 = 256
learning_rate = 0.01
X = tf.placeholder(tf.float32, [None, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
weights = {
'h1' : tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3' : tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'h4' : tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
'out': tf.Variable(tf.random_normal([n_hidden_4, num_classes]))
}
biases = {
'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
'b3' : tf.Variable(tf.random_normal([n_hidden_3])),
'b4' : tf.Variable(tf.random_normal([n_hidden_4])),
'out': tf.Variable(tf.random_normal([num_classes])),
}
# Multi-Layer Perceptron
def mlp(x):
L1 = tf.nn.relu(tf.matmul(x, weights['h1']) + biases['b1'])
L2 = tf.nn.relu(tf.matmul(L1, weights['h2']) + biases['b2'])
L3 = tf.nn.relu(tf.matmul(L2, weights['h3']) + biases['b3'])
L4 = tf.nn.relu(tf.matmul(L3, weights['h4']) + biases['b4'])
Lout = tf.matmul(L4, weights['out'] ) + biases['out']
return Lout
logits = mlp(X)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Neural Network Core Model... End here
# for comparison
prediction = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(prediction,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Training
for step in range(1, num_steps+1) :
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={X:batch_x, Y:batch_y})
if step % display_step == 0:
loss, acc = sess.run([cost, accuracy], feed_dict={X:batch_x, Y:batch_y})
print("step " + str(step) + ", Minibatch loss = " + "{:.4f}".format(loss) + ", Training Accuracy = "+"{:.4f}".format(acc*100) +"%")
print("Optimization Finished!!")
t2 = time.time()
# Test
print("Testing Accuracy : {:1f}%".format(sess.run(accuracy, feed_dict={X:mnist.test.images, Y:mnist.test.labels})*100))
print("Learning Time: "+str(t2-t1)+" seconds")
| UTF-8 | Python | false | false | 2,896 | py | 11 | recognize_digit_MNIST.py | 11 | 0.634642 | 0.601332 | 0 | 82 | 33.756098 | 143 |
gregadoff/rosalind | 5,299,989,656,859 | 4fbd0d88a75eaa1dce1e42ac25ee316c45ebf6c9 | a06c6c27ec89fc4ad9e77b063e2764a035861a4a | /rosalind/solutions/aspc.py | ef0cfa5d78562dd9685bbf5b3f35394559b139e5 | [
"MIT"
]
| permissive | https://github.com/gregadoff/rosalind | ea89a66037a4481af9c2ebf44aceabdb7e2b12df | 620d97036e6899a2c4a68cfb411bc000895da2c4 | refs/heads/master | 2020-03-17T02:36:16.390900 | 2018-05-23T01:41:48 | 2018-05-23T01:41:48 | 133,196,916 | 0 | 0 | MIT | false | 2019-10-21T16:05:46 | 2018-05-13T01:41:13 | 2018-05-23T01:42:18 | 2019-10-21T16:05:45 | 731 | 0 | 0 | 1 | Python | false | false | from decimal import Decimal, getcontext
from functools import reduce
from operator import add, mul
from rosalind.core.rosalindsolution import RosalindSolution
from rosalind.solutions import problem_order
class RosalindImplementation(RosalindSolution):
def __init__(self):
super().__init__()
self.problem_name = self.get_pname(__file__)
self.problem_number = problem_order.index(self.problem_name)
def solve(self, instream, outstream):
# can speed this up with modular arith, but it make the factorial
# method more difficult to implement
getcontext().prec = 1000 # make this large enough for input
n, m = map(Decimal, map(int, instream.read().split()))
def decrange(a, b):
while a < b:
yield a
a = a + Decimal(1)
def fact(a):
if a <= 1:
return 1
return reduce(mul, decrange(1, a + 1))
def binom(n, k):
return fact(n) / (fact(k) * fact(n - k))
res = reduce(add, (binom(n, i) for i in decrange(m, n + Decimal(1))))
print(int(res % Decimal(1000000)), file=outstream)
| UTF-8 | Python | false | false | 1,173 | py | 209 | aspc.py | 61 | 0.599318 | 0.584825 | 0 | 35 | 32.514286 | 77 |
gnorgol/Python_Exercice | 11,467,562,713,085 | 6b73135ee035e6c40a41ce59c3686c9784c138c7 | 3dffcaf0a99c08b3bc6dc273c32e3510497473ba | /Exercice 29.py | 9471f7a9d284801571709caa04e6750c6b3998cc | []
| no_license | https://github.com/gnorgol/Python_Exercice | 400674f2947467f2edbb86794e50c653bdb8c69d | 73494395dd5110d9f8c1cfdc59df45ab0fb9e0fb | refs/heads/main | 2023-01-14T09:31:31.411617 | 2020-11-16T15:17:07 | 2020-11-16T15:17:07 | 311,283,645 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s = input("Saisir une phrase: ")
s = s.split()
resultat = []
for each in s :
if each not in resultat:
resultat.append(each)
resultat = " ".join(resultat)
print(resultat)
| UTF-8 | Python | false | false | 190 | py | 54 | Exercice 29.py | 54 | 0.610526 | 0.610526 | 0 | 8 | 21.75 | 32 |
subahdeva/IS362_Week7 | 1,005,022,391,206 | db9c539e145ce2438b38dfedaa0248dc34e1aa9c | 69855780e7b885b97acd95b3be7bfadcae03d062 | /IS362_Week7.py | a84a83e4630bdfb9283729080e88b43cd49436e0 | []
| no_license | https://github.com/subahdeva/IS362_Week7 | 865ae3cf1970b5c2f1b49c0d5fc660907b53b081 | 91b2ac6421e547e96145141fb9aad006b2b2ce31 | refs/heads/main | 2023-04-24T00:36:52.086241 | 2021-05-18T03:35:44 | 2021-05-18T03:35:44 | 368,388,382 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[352]:
import numpy as np
import pandas as pd
import re
df = pd.read_csv('popular_movies.csv', na_filter= False, skipinitialspace=True)
# Convert values into int
df['The Dig'] = pd.to_numeric(df['The Dig'])
df['Nomadland'] = pd.to_numeric(df['Nomadland'])
df['Minari'] = pd.to_numeric(df['Minari'])
df['The Father'] = pd.to_numeric(df['The Father'])
df['Nobody'] = pd.to_numeric(df['Nobody'])
df.to_csv('popular_movies.csv', index=False, mode='w')
df = pd.read_csv('popular_movies.csv')
# list out movie columns
cols = ['The Dig' , 'Nomadland', 'Minari', 'The Father', 'Nobody']
# Calculate the user rating averages
user_0_avg = df.iloc[:1].sum(axis=1).div(len(cols)).to_string(header=None,index=False)
user_1_avg = df.iloc[1:2].sum(axis=1).div(len(cols)).to_string(header=None,index=False)
user_2_avg = df.iloc[2:3].sum(axis=1).div(len(cols)).to_string(header=None,index=False)
user_3_avg = df.iloc[3:4].sum(axis=1).div(len(cols)).to_string(header=None,index=False)
user_4_avg = df.iloc[4:5].sum(axis=1).div(len(cols)).to_string(header=None,index=False)
# Calculate the movie rating averages
movie_0_avg = df.iloc[0:5, 1].sum(axis=0) / (len(cols))
movie_1_avg = df.iloc[0:5, 2].sum(axis=0) / (len(cols))
movie_2_avg = df.iloc[0:5, 3].sum(axis=0) / (len(cols))
movie_3_avg = df.iloc[0:5, 4].sum(axis=0) / (len(cols))
movie_4_avg = df.iloc[0:5, 5].sum(axis=0) / (len(cols))
#df.info()
#Append new columns with total averages for user and movie
df['Average Rating for User'] = [user_0_avg,user_1_avg,user_2_avg,user_3_avg,user_4_avg,'']
df['Average Rating for Movie'] = [movie_0_avg,movie_1_avg,movie_2_avg, movie_3_avg, movie_4_avg,'']
df.head()
# what might be advantages and disadvantages of using normalized ratings instead of the actual ratings:
# The advantages are you can see what the user normally rates for what genre of movies, using a user's pattern of rating
# can predict what movies they will watch in the future. This can be useful for streaming websites or services to track.
# The disadvantages of using normalized ratings is you can't account for the outliers that you get in actual or raw data.
# In[ ]:
# In[ ]:
# In[ ]:
| UTF-8 | Python | false | false | 2,222 | py | 3 | IS362_Week7.py | 1 | 0.686319 | 0.660216 | 0 | 70 | 30.685714 | 121 |
increscent/tmp | 5,763,846,119,875 | 06988e2dda2d15c2c83b53c18990e7c184082e03 | 38cdd2ca9e4996dbd8dff4c9df7d80f4966611e0 | /calc.py | 279ead1236b43b8e039da016550b2a5c1f93af6e | []
| no_license | https://github.com/increscent/tmp | 80f0fe61c076e598885d0d496a0c9b61e12d46b8 | 9f81fa966a9ccc28c1ed9b417850490e2c21a4c6 | refs/heads/master | 2020-05-05T01:08:29.139358 | 2019-06-11T02:34:09 | 2019-06-11T02:34:09 | 179,593,914 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import sys
argv = sys.argv
q = np.array([17,16])
p1 = np.array([int(argv[1]), int(argv[2])])
p2 = np.array([int(argv[3]), int(argv[4])])
print(q, p1, p2)
print(np.dot(q-p1, p2-p1))
| UTF-8 | Python | false | false | 203 | py | 30 | calc.py | 6 | 0.610837 | 0.536946 | 0 | 11 | 17.454545 | 43 |
ababjac/rare-codon-clustering | 16,552,803,986,037 | 894f88ed9636c431ca52609507dc822faeb0657a | 0539b8e54cc9aaafd9251aaf6950a5dec7dd7b74 | /Code/Scripts/calc_avg_clusters.py | fbfa89448f56f57db13ebf82ce55f939535af457 | []
| no_license | https://github.com/ababjac/rare-codon-clustering | c27fa4f6a106004e603b2552e4f080402c82eba3 | b1acf762d9ab701294b8123c0717a2fc8fc4c55a | refs/heads/main | 2023-07-11T14:36:09.689646 | 2021-08-09T17:58:22 | 2021-08-09T17:58:22 | 389,656,045 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import pandas as pd
def calc_avg_per_cluster(lines, df, colname):
d = {}
total_avg = sum(df['MM_avg']) / len(df)
for line in lines:
if line.__contains__('Cluster'):
text = line.split(' ')
key = text[0]+text[1]
continue
list = line.split(', ')
list.remove('\n')
#print(list)
s = 0
for elem in list:
#print(elem, df[df[colname] == elem]['MM_avg'].values[0])
s += df[df[colname] == elem]['MM_avg'].values[0]
cluster_avg = s / len(list)
#print(cluster_avg)
d[key] = cluster_avg
d['Total'] = total_avg
return d
#------------------------------------------------------------------------------------------------------#
# DF_ecoli = pd.read_csv('../Files/MM/minMax_full.csv')
# #print(DF_ecoli[DF_ecoli['Gene'] == 'thrA']['MM_avg'].values[0])
# file = open('../Files/Clusters/Hierarchical/Centroid/Full/hclust_HD_ecoli_t0.28.txt', 'r')
# dct = calc_avg_per_cluster(file.readlines(), DF_ecoli, 'Gene')
# print(dct)
FOLDER = 'Full'
#FOLDER = 'Omit10'
#FOLDER = 'Omit25'
LINKAGE = 'Centroid/'
#LINKAGE = 'Single/'
DIRECTORY = '../Files/Clusters/Hierarchical/'+LINKAGE+FOLDER+'/'
#SIG = '0.05'
DF_ecoli = pd.read_csv('../Files/MM/minMax_full.csv')
DF_yeast = pd.read_csv('../Files/MM/minMax_full_yeast.csv')
with os.scandir(DIRECTORY) as d:
for entry in d:
if entry.name.endswith('.txt') and entry.is_file():
input_path = os.path.join(DIRECTORY, entry.name)
file = open(input_path, 'r')
lines = file.readlines()
if entry.name.__contains__('ecoli'):
df = DF_ecoli
colname = 'Gene'
else:
df = DF_yeast
colname = 'locus_tag'
dict = calc_avg_per_cluster(lines, df, colname)
print(entry.name)
print(dict)
print()
| UTF-8 | Python | false | false | 1,955 | py | 416 | calc_avg_clusters.py | 19 | 0.514066 | 0.505882 | 0 | 73 | 25.780822 | 104 |
ITBlackwood/BaronAI | 7,851,200,224,571 | 8094f0138763e9b99eb85a2e3820bcfd625a8195 | 1b624c94150602e3165c39c55e6ebd0ef6e918f0 | /MoMMI/modules.py | 0104f86de83595be0c7137b94491e5360bf4fede | [
"MIT"
]
| permissive | https://github.com/ITBlackwood/BaronAI | 037e81882ee41b38d93212f7c9429647e7ea3fdd | 88519db71bf7c84fbaa418af46682fff2276d6ef | refs/heads/master | 2020-06-13T11:03:43.887834 | 2016-08-09T07:37:22 | 2016-08-09T07:37:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .client import client
from .config import config
import asyncio
import logging
import os
import os.path
import importlib
logger = logging.getLogger(__name__)
modules = []
async def load_modules():
count = 0
directory = config.get("moduledir", os.path.join("MoMMI", "Modules"))
for file in os.listdir(directory):
path = os.path.join(directory, file)
if os.path.isfile(path) and file[-3:] == ".py" and file != "__init__.py":
logger.info("Loading module %s", path)
try:
mod = importlib.import_module("MoMMI.Modules.%s" % (file[:-3]))
if hasattr(mod, "load"):
await mod.load()
modules.append(mod)
count += 1
except:
logger.exception("Error while loading module %s", path)
return count
async def reload_modules():
count = 0
errored = 0
new = 0
filenames = []
for module in modules:
if hasattr(module, "unload"):
try:
await module.unload()
except:
logger.exception("Exception while unloading a module.")
try:
filenames.append(module.__file__)
importlib.reload(module)
if hasattr(module, "load"):
await module.load()
count += 1
except:
logger.exception("Exception while trying to reload a module.")
errored += 1
directory = os.path.join("MoMMI", "Modules")
for file in os.listdir(directory):
path = os.path.join(directory, file)
if os.path.isfile(path) and file[-3:] == ".py" and file != "__init__.py":
if os.path.abspath(path) in filenames:
continue
logger.info("Loading NEW module %s", path)
try:
modules.append(importlib.import_module("MoMMI.Modules.%s" % (file[:-3])))
new += 1
except:
logger.exception("Error while loading NEW module %s", path)
return count, errored, new
| UTF-8 | Python | false | false | 2,107 | py | 13 | modules.py | 11 | 0.535358 | 0.529663 | 0 | 74 | 27.418919 | 89 |
davidgabriel94/my-first-blog | 19,129,784,336,910 | e50b3934dd41f6edb0fb9e7618482ff4c7f614ab | 413fdf9151fccda6ef260015f19fc858c70d69ee | /agenda/apps/alumnos/urls.py | 82a769a17c635cd3104417cdde3e05abd74f9f60 | []
| no_license | https://github.com/davidgabriel94/my-first-blog | aaada7f112347c98301e37dc7319043d165c3994 | 7fc12561497513adcb97f3b202632a941c4940d0 | refs/heads/master | 2021-05-15T09:22:30.319333 | 2017-10-23T20:55:43 | 2017-10-23T20:55:43 | 107,487,363 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url,include
from apps.alumnos.views import alumno_list
urlpatterns = [
url(r'^$',alumno_list),
]
| UTF-8 | Python | false | false | 131 | py | 8 | urls.py | 8 | 0.725191 | 0.725191 | 0 | 6 | 20.833333 | 42 |
openstack/neutron-fwaas | 7,146,825,607,359 | e7d4f36851e8d80b7249006ea05d89657be03159 | 45fc764c090296de7f19358a025231c54d4d3eff | /neutron_fwaas/tests/unit/privileged/test_utils.py | 4b3a4dad42e377ce9dcd9e8e801d4c4278fd07ca | [
"Apache-2.0"
]
| permissive | https://github.com/openstack/neutron-fwaas | 71e580dc7cfebee7fa1806b2f53f33a3c1726e21 | 193d1e27661438e11a44b379367d56affba79b21 | refs/heads/master | 2023-09-01T05:01:29.629535 | 2023-06-27T03:14:25 | 2023-06-27T03:15:24 | 27,489,947 | 78 | 49 | Apache-2.0 | false | 2022-03-15T02:26:21 | 2014-12-03T14:07:11 | 2022-03-01T11:33:52 | 2022-03-14T07:07:03 | 95,817 | 59 | 43 | 0 | Python | false | false | # Copyright (c) 2017 Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import testtools
from neutron_fwaas.privileged import utils
from neutron_fwaas.tests import base
class InNamespaceTest(base.BaseTestCase):
ORG_NETNS_FD = 124
NEW_NETNS_FD = 421
NEW_NETNS = 'newns'
def setUp(self):
super(InNamespaceTest, self).setUp()
# NOTE(cby): we should unmock os.open/close as early as possible
# because there are used in cleanups
open_patch = mock.patch('os.open', return_value=self.ORG_NETNS_FD)
self.open_mock = open_patch.start()
self.addCleanup(open_patch.stop)
close_patch = mock.patch('os.close')
self.close_mock = close_patch.start()
self.addCleanup(close_patch.stop)
self.setns_mock = mock.patch(
'pyroute2.netns.setns').start()
def test_in_namespace(self):
with utils.in_namespace(self.NEW_NETNS):
self.setns_mock.assert_called_once_with(self.NEW_NETNS)
setns_calls = [mock.call(self.NEW_NETNS), mock.call(self.ORG_NETNS_FD)]
self.setns_mock.assert_has_calls(setns_calls)
def test_in_no_namespace(self):
for namespace in ('', None):
with utils.in_namespace(namespace):
pass
self.setns_mock.assert_not_called()
self.close_mock.assert_not_called()
def test_in_namespace_failed(self):
with testtools.ExpectedException(ValueError):
with utils.in_namespace(self.NEW_NETNS):
self.setns_mock.assert_called_once_with(self.NEW_NETNS)
raise ValueError
setns_calls = [mock.call(self.NEW_NETNS), mock.call(self.ORG_NETNS_FD)]
self.setns_mock.assert_has_calls(setns_calls)
def test_in_namespace_enter_failed(self):
self.setns_mock.side_effect = ValueError
with testtools.ExpectedException(ValueError):
with utils.in_namespace(self.NEW_NETNS):
self.fail('It should fail before we reach this code')
self.setns_mock.assert_called_once_with(self.NEW_NETNS)
def test_in_namespace_exit_failed(self):
self.setns_mock.side_effect = [self.NEW_NETNS_FD, ValueError]
with testtools.ExpectedException(utils.BackInNamespaceExit):
with utils.in_namespace(self.NEW_NETNS):
pass
| UTF-8 | Python | false | false | 2,943 | py | 121 | test_utils.py | 73 | 0.663609 | 0.658512 | 0 | 80 | 35.7875 | 79 |
lmaag182/cortical_one | 704,374,679,126 | 13865620ab932220469f82cdd79fdf5cafa387c0 | 4031073d89e3fb690223259c79b71f59a3d2fb65 | /nupic/abstract_sensor.py | e0e0c31dbe7cffdded42de82381b5d9ee0b29108 | []
| no_license | https://github.com/lmaag182/cortical_one | 0484b81f576b0ffd11871239e6b40776a519e29b | 95167afec176c90abf8a35f220eac5cec6e1c500 | refs/heads/master | 2021-01-10T08:14:51.253956 | 2016-03-23T20:15:47 | 2016-03-23T20:15:47 | 49,380,888 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sensor import Sensor
import json
from nupic.frameworks.opf.modelfactory import ModelFactory
from tools import getModelParamsFromFileNG
import os
import time
import psutil
import copy
from abstract_sensor_listener import AbstractSensorListener
from swarm_config import SwarmConfig
import stomp
import threading
class AbstractSensor(Sensor, threading.Thread):
def __init__(self,name,admin_in,admin_out,sensor_spec, sensors_dir,sensor_in,store,swarm):
threading.Thread.__init__(self)
#self.config = config
self.sensor_in = sensor_in
self.store = store
self.swarm = swarm
self.name = name
self.brain_available = False
threading.Thread.__init__(self)
Sensor. __init__(self,name=name,admin_in=admin_in, admin_out=admin_out,sensor_spec=sensor_spec, sensors_dir=sensors_dir)
swarm_config_path = sensors_dir + sensor_in +'/stores/' + store + '/swarms/' + swarm +'/'
#store_path = sensors_dir + sensor_in +'/stores/' + store + '/out.csv'
#model = ModelFactory.loadFromCheckpoint('/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save')
print swarm_config_path
#load original swarm config file
with open(swarm_config_path + 'swarm_config.json')as json_file:
self.swarm_config = json.load(json_file)
print(self.swarm_config)
self.swarm_config_ng = SwarmConfig(self.swarm_config)
print self.swarm_config_ng.get_predicted_field()
#if there is a 'brain', then tae the existing brain
self.possible_brain_path = str(swarm_config_path + 'model_save')
if os.path.exists(self.possible_brain_path):
possible_brain_2 = '/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save'
print "load existing brain..."
print self.possible_brain_path
#model = ModelFactory.loadFromCheckpoint(possible_brain_2)
model = ModelFactory.loadFromCheckpoint(self.possible_brain_path)
#use this case to add the availabilty of a 'brain' (???!!!) to your annuncement
else:
#laod model configuration
model = ModelFactory.create(getModelParamsFromFileNG(swarm_config_path))
#configure prediction
model.enableInference({"predictedField": self.swarm_config_ng.get_predicted_field()})
self.connection_sensor_in = stomp.Connection()
self.connection_sensor_in.set_listener(name=self.name, lstnr=AbstractSensorListener(self.name,topic = '/topic/' +self.sensor_in,config=self.swarm_config_ng,model=model))
self.connection_sensor_in.start()
self.connection_sensor_in.connect(self.user, self.password, wait=True)
#self.connection_sensor_in.connect('admin', 'password', wait=True)
self.abstract_listener = self.connection_sensor_in.get_listener(name=self.name)
self.connection_sensor_in.subscribe(destination='/topic/' +self.sensor_in, id=2, ack='auto')
self.values = []
self.self_announcement()
def run(self):
while True:
self.announcement_check()
values = self.abstract_listener.check_input()
self.send_payload(values)
self.check_recording(values)
time.sleep(0.5)
def self_announcement(self):
stores = []
if not os.path.exists(self.sensor_data_dir):
os.makedirs(self.sensor_data_dir)
store_dirs = os.listdir(self.sensor_data_dir)
for store_dir_name in store_dirs:
store = {}
store['name']= store_dir_name
swarms_dir = self.sensor_data_dir + '/' + store_dir_name + '/swarms/'
if os.path.exists(swarms_dir):
swarm_dir_names = os.listdir(self.sensor_data_dir + '/' + store_dir_name + '/swarms/' )
print store_dir_name
swarms = []
for swarm_dir_name in swarm_dir_names:
if os.path.exists(self.sensor_data_dir + '/' + store_dir_name + '/swarms/'+swarm_dir_name + '/model_save/'):
print "jajajajajaaaaaaaaaa..........................................."
#use this case to add the availabilty of a 'brain' (???!!!) to your annuncement
swarms.append(swarm_dir_name)
print '\t%s' % swarm_dir_name
store['swarms']= swarms
stores.append(store)
announce = {'message': {'type': "sensor_announcement",
'sensor': {'name': self.name, 'sensor_items': self.swarm_config_ng.get_column_names(self.swarm_config_ng.get_field_names()),
'stores': store_dirs,
'store_ng': stores
}
}
}
self.connection.send(body=json.dumps(announce), destination=self.admin_in)
| UTF-8 | Python | false | false | 5,050 | py | 19 | abstract_sensor.py | 16 | 0.599604 | 0.597822 | 0 | 115 | 42.895652 | 177 |
Wanger-SJTU/leetcode-solutions | 14,345,190,790,884 | 22dff8d8472692e7e2947bdf275d7a0590bc49a9 | 68d9fffda9c1ee0f4819371067adfd4985332319 | /python/47.全排列-ii.py | c70d40f117627538fc5865fd215e1998e73c64af | [
"MIT"
]
| permissive | https://github.com/Wanger-SJTU/leetcode-solutions | ade9486cef05ede6fa44cbbb5d726037518fac15 | eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4 | refs/heads/master | 2023-04-11T19:56:13.561234 | 2021-05-10T12:00:28 | 2021-05-10T12:00:28 | 129,606,869 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# @lc app=leetcode.cn id=47 lang=python3
#
# [47] 全排列 II
#
from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
def helper(nums, path, res):
if not nums:
res.append(path)
for i, num in enumerate(nums):
if i > 0 and num == nums[i-1]:
continue
helper(nums[:i]+nums[i+1:],path+[num], res)
nums.sort()
res = []
helper(nums, [], res)
return res
if __name__ == "__main__":
s = Solution()
res = s.permuteUnique([1,2,1])
print(res)
| UTF-8 | Python | false | false | 631 | py | 364 | 47.全排列-ii.py | 325 | 0.4912 | 0.4736 | 0 | 26 | 23.038462 | 64 |
tf-venegas10/difficultyPrediction | 10,282,151,740,676 | 730c5e437b6ec07c3fc9f35401c2deff06ea8dc5 | 34e1b76ba9a864bd8150af697cdc08e9348ad921 | /MLModel/FeatureHeuristics.py | 7b999f53e52d4c87cc8f3c5a40bb1c9a28806358 | []
| no_license | https://github.com/tf-venegas10/difficultyPrediction | 92e97d50bf8a5219d57c829cc66a4569241d8964 | e9d0bd44dce7bf107014e2a182327a7afa432bee | refs/heads/master | 2020-03-08T06:46:42.672529 | 2018-12-06T21:28:38 | 2018-12-06T21:28:38 | 127,979,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import copy
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from GetDataSet import getDataSubSet
from Validation import manual_cross_validation
feature_lower_bound = 0
feature_upper_bound = 100
feature_amount = 100
# initialize models
forest = RandomForestClassifier(n_estimators=100, max_depth=20, random_state=111)
gdBoost = GradientBoostingClassifier(random_state=111)
mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(10, 2), random_state=111)
models = [forest, gdBoost, mlp]
names = ["Random Forest", "Gradient Boosting", "MuliLayer Perceptrons"]
def tree_selection_heuristic():
# The feature set that is going to be evaluated
feature_set = [0]
max_accuracy = 0.0
best_set = []
best_model = None
# For every set of 1 feature the recursive search is applied
for i in xrange(1,feature_amount):
feature_set.pop()
feature_set.append(i)
new_x_norm,y, _, _ = getDataSubSet(feature_set)
#INIT FOR RECURSIVE CALL
model, calc_best_model, calc_accuracy = manual_cross_validation(new_x_norm, y, models, names,True)
#RECURSIVE CALL
calc_accuracy, calc_best_set, calc_best_model = recursive_tree_exploration(feature_set,
calc_accuracy, calc_best_model)
if calc_accuracy > max_accuracy:
max_accuracy = calc_accuracy
best_set = calc_best_set
best_model = calc_best_model
print "-----------------------------------"
print "---------BEST FEATURE SET----------"
print best_set
print "-----------------------------------"
print "-------------ACCURACY--------------"
print max_accuracy
return best_model
def recursive_tree_exploration( feature_set, past_accuracy, past_model):
last_added = feature_set[len(feature_set) - 1]
new_feature_set = copy.copy(feature_set)
#POR QUe AGREGAR 0 ??
## COUNT 1 APPEND
new_feature_set.append(0)
max_accuracy = past_accuracy
best_set = feature_set
best_model = past_model
for iter in xrange(last_added +1, feature_amount):
## COUNT 1 POP
new_feature_set.pop()
## COUNT 1 APPEND
new_feature_set.append(iter)
new_x_norm, y, _, _ = getDataSubSet(new_feature_set)
model, calc_best_model, calc_accuracy = manual_cross_validation(new_x_norm, y, models, names,True)
if calc_accuracy > max_accuracy:
max_accuracy = calc_accuracy
best_set = new_feature_set
best_model = calc_best_model
calc_accuracy, calc_best_set, calc_best_model = recursive_tree_exploration(new_feature_set,max_accuracy, calc_best_model)
if calc_accuracy > max_accuracy:
max_accuracy = calc_accuracy
best_set = calc_best_set
best_model = calc_best_model
print best_set
print "BEST SET ACCURACY: "+str(max_accuracy)
print "BEST SET MODEL: " + str(best_model)
return max_accuracy, best_set, best_model
## Count number of 'easy' labeled instances and total instances
# This is done to keep control of the correct distribution of the dataset and the parameters of the experiment.
tree_selection_heuristic()
| UTF-8 | Python | false | false | 3,388 | py | 154 | FeatureHeuristics.py | 114 | 0.634002 | 0.622786 | 0 | 96 | 34.291667 | 133 |
yougov/tortilla | 7,327,214,211,321 | 8cad212e1c6115313faefb97cc343e90b64a7525 | 9319c5649d6908953eeea2e7f63f9ba3a90878f3 | /tests/test_wrappers.py | 4ed6acd70dbe10adb6ba39bf5a6c9f5dc0311616 | [
"MIT"
]
| permissive | https://github.com/yougov/tortilla | e598420fa75ec27d3ed2681bf53776e0c7351334 | eccc8a268307e9bea98f12f958f48bb03ff115b7 | refs/heads/master | 2023-06-21T05:51:15.346324 | 2019-02-22T12:31:20 | 2019-02-22T12:31:20 | 178,088,051 | 0 | 1 | null | true | 2019-03-27T23:08:51 | 2019-03-27T23:08:50 | 2019-03-23T11:38:40 | 2019-02-22T12:31:25 | 150 | 0 | 0 | 0 | null | false | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import pytest
from requests.exceptions import HTTPError
from tortilla.utils import Bunch, bunchify, run_from_ipython
def time_function(fn, *args, **kwargs):
t1 = time.time()
fn(*args, **kwargs)
t2 = time.time()
return t2 - t1
def test_json_response(api, endpoints):
assert api.user.get('jimmy') == endpoints['/user/jimmy']['body']
assert api.user.get('имя') == endpoints['/user/имя']['body']
assert api.has_self.get() == endpoints['/has_self']['body']
def test_non_json_response(api):
with pytest.raises(ValueError):
api.nojson.get()
assert api.nojson.get(silent=True) is None
def test_cached_response(api):
api.cache.get(cache_lifetime=100)
assert api.cache.get() == "the first response"
assert api.cache.get() == "the first response"
api.cache.get(cache_lifetime=0.25, ignore_cache=True)
assert api.cache.get() == "the second response"
assert api.cache.get() == "the second response"
def test_request_delay(api):
api.config.delay = 0.2
assert time_function(api.test.get) >= 0.2
assert time_function(api.test.get, delay=0.1) >= 0.1
assert time_function(api.test.get) >= 0.2
def test_request_methods(api):
assert api.put_endpoint.put().message == "Success!"
assert api.post_endpoint.post().message == "Success!"
assert api.patch_endpoint.patch().message == "Success!"
assert api.delete_endpoint.delete().message == "Success!"
assert api.head_endpoint.head() is None
def test_extensions(api):
assert api.extension.hello.get(extension='json').message == "Success!"
assert api.extension.hello.get(extension='.json').message == "Success!"
def test_wrap_config(api):
api.endpoint(debug=True, silent=True, extension='json', cache_lifetime=5)
assert api.endpoint.config.debug
assert api.endpoint.config.silent
assert api.endpoint.config.extension == 'json'
assert api.endpoint.config.cache_lifetime == 5
api.endpoint(debug=False, silent=False, extension='xml', cache_lifetime=8)
assert not api.endpoint.config.debug
assert not api.endpoint.config.silent
assert api.endpoint.config.extension == 'xml'
assert api.endpoint.config.cache_lifetime == 8
def test_wrap_chaining(api):
assert api.one.two.three is api('one').two('three')
assert api.one.two.three is api.one('two')('three')
assert api.one.two.three is api('one', 'two').three
assert api.one.two.three is api('one', 'two', 'three')
assert api.one(2) is api('one', 2)
assert api.one.two.three is not api('one/two/three')
def test_response_exceptions(api):
with pytest.raises(HTTPError):
api.status_404.get()
with pytest.raises(HTTPError):
api.status_500.get()
api.status_404.get(silent=True)
api.status_500.get(silent=True)
def test_bunchify():
bunch = bunchify([{'a': 1}, {'b': 2}])
assert isinstance(bunch[0], Bunch)
def test_run_from_ipython():
assert getattr(__builtins__, '__IPYTHON__', False) == run_from_ipython()
def test_config_endpoint(api, endpoints):
assert api.get('config') == endpoints['/config']['body']
assert api('config').get() == endpoints['/config']['body']
| UTF-8 | Python | false | false | 3,272 | py | 15 | test_wrappers.py | 10 | 0.670545 | 0.657685 | 0 | 108 | 29.240741 | 78 |
Arrow023/nuvi | 7,481,833,039,389 | 83536fd5f449829c3d9579a21378bab38835945c | d8ee292721ea9f6d0123c7da12830f98ac713d03 | /nuvi.py | 12bea40f07b39123163fcf904d7dc5c2cd7da1d9 | []
| no_license | https://github.com/Arrow023/nuvi | 8064c260a1c621b8f874c747c1cc726419727401 | 73fa3b68878c33a3709605125ddafddd860bb801 | refs/heads/master | 2021-03-29T01:09:35.250828 | 2020-03-17T08:01:07 | 2020-03-17T08:01:07 | 247,911,498 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import speech_recognition as sr
import pyttsx3
import time
from flask import Flask, render_template,send_file
import random
engine=pyttsx3.init()
voices=engine.getProperty('voices')
engine.setProperty('rate',170)
#print(voices[1].id)
engine.setProperty('voice',voices[1].id)
values={'name':'','age':'','temp':'','description':'','prescription':''}
portno=random.randint(1024,5000)
app=Flask(__name__)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def takeCommand():
r=sr.Recognizer()
while(True):
with sr.Microphone() as source:
print('Listening to source.....')
r.pause_threshold=0.5
audio=r.listen(source)
try:
print('Recognizing....')
query=r.recognize_google(audio,language='en-in')
print(f'User said:{query}\n')
return query
except Exception as e:
#print(e)
speak("sorry! I couldn't get you.")
def getData():
speak("Please..say..Patient's name")
query=takeCommand().lower()
values['name']=query
print("Patient's Name:",values['name'])
speak("What is the age?")
query=takeCommand()
values['age']=query
print("Patient's age: ",values['age'])
speak("What's the current body temperature?")
query=takeCommand().lower()
values['temp']=query
print("")
speak("What's the problem of the patient?")
query=takeCommand().lower()
values['description']=query
speak("What's the treatment required?")
query=takeCommand().lower()
values['prescription']=query
speak("Thank you! for providing the information.")
if __name__ == "__main__":
print("Starting Nuvi....")
time.sleep(1)
print("Building cache....")
time.sleep(1)
print("Request for access..")
time.sleep(1)
speak("Hello. My name is Nuvi. I'm your personal, medical assistant.")
getData()
#values={'name':'Rajesh','age':'20','temp':'90.5','description':'cough & cold','prescription':'just a regular checkup'}
f=open("formdata.txt","w")
for i in values.keys():
f.write(values[i]+"\n")
f.close()
speak("Your report is ready. You can visit 127.0.0.1:"+str(portno)+" for view")
@app.route('/')
def start():
return render_template('medical.html',pname=values['name'],page=values['age'],ptemp=values['temp'],pdes=values['description'],ppres=values['prescription'])
app.run(port=portno)
| UTF-8 | Python | false | false | 2,516 | py | 11 | nuvi.py | 7 | 0.604531 | 0.59221 | 0 | 74 | 32.391892 | 163 |
xiaonuoAndy/maya_lancher | 9,216,999,822,863 | 8aae3ebd4d605e7dbd4ff6ba2d2d89985950c777 | f1959aa9f1e51b7b0956ac7405df58c0044693bd | /callback/__init__.py | b1b897e0e9a5730d12bdd22a2ea610a897a3ab83 | []
| no_license | https://github.com/xiaonuoAndy/maya_lancher | fbdddc320804a4a5094503293a3d363a546c8abe | 1145f798d79163bcad29fe5f9c74e19b64b3d408 | refs/heads/master | 2022-02-04T12:58:33.297992 | 2019-07-27T05:47:35 | 2019-07-27T05:47:35 | 194,384,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
__author__ = 'yangzhuo'
import pymel.core as pm
from auto_dayu_menu import auto_dayu_menu
pm.scriptJob(event=('SceneOpened', auto_dayu_menu))
| UTF-8 | Python | false | false | 194 | py | 9 | __init__.py | 8 | 0.685567 | 0.680412 | 0 | 10 | 18.4 | 51 |
MWalega/LSTM_covid19 | 16,088,947,536,981 | 876dc6b55146d67f5f356cd3ea4d018eb1705ac0 | 1b28889033c17451d6a9fc15695ecd94f842b523 | /covid19Predictor.py | 2d70a8d7075c6a2662645a143b01fb6b0564be9e | []
| no_license | https://github.com/MWalega/LSTM_covid19 | 6f1376b561b3545cb4650706eb234133665b3ed8 | 195ac48e6269b112377fe977b8e3472813511533 | refs/heads/master | 2023-03-09T19:56:11.190201 | 2021-02-27T05:38:37 | 2021-02-27T05:38:37 | 342,026,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
from torch import nn
class covid19Predictor(nn.Module):
def __init__(self, n_features, n_hidden, seq_length, n_layers=2):
super(covid19Predictor, self).__init__()
self.n_hidden = n_hidden
self.seq_length = seq_length
self.n_layers = n_layers
self.lstm = nn.LSTM(
input_size=n_features,
hidden_size=n_hidden,
num_layers=n_layers,
dropout=0.5
)
self.linear = nn.Linear(in_features=n_hidden, out_features=1)
def reset_hidden_state(self):
self.hidden = (
torch.zeros(self.n_layers, self.seq_length, self.n_hidden),
torch.zeros(self.n_layers, self.seq_length, self.n_hidden)
)
def forward(self, sequences):
lstm_out, self.hidden = self.lstm(
sequences.view(len(sequences), self.seq_length, -1),
self.hidden
)
y_pred = self.linear(
lstm_out.view(self.seq_length, len(sequences), self.n_hidden)[-1]
)
return y_pred | UTF-8 | Python | false | false | 1,064 | py | 2 | covid19Predictor.py | 2 | 0.566729 | 0.557331 | 0 | 41 | 24.97561 | 77 |
edwardsrob222/7-1-1-django-ice-cream-shop | 19,104,014,541,213 | b75bc823b74824846223338f6a04854861259160 | 0f632bd0e83d20ca4afbe2c35ed921bc0dafd59f | /ice_cream/models.py | 60bc228fb1a56c59a549a29586af40b43c076556 | []
| no_license | https://github.com/edwardsrob222/7-1-1-django-ice-cream-shop | b87235dbd7408d235acf7a4261194fe30f3e1620 | 0af22191504b664fd8a93d98b6f73b53c91abec0 | refs/heads/master | 2023-05-03T06:14:58.931344 | 2019-12-05T19:37:07 | 2019-12-05T19:37:07 | 213,447,384 | 0 | 0 | null | false | 2023-04-21T20:39:07 | 2019-10-07T17:38:54 | 2019-12-05T19:37:11 | 2023-04-21T20:39:07 | 27 | 0 | 0 | 2 | Python | false | false | from django.db import models
# Create your models here.
from django.db import models
import datetime
class IceCream(models.Model):
DAILY = 'Daily'
WEEKLY = 'Weekly'
SEASONAL = 'Seasonal'
VANILLA = 'Vanilla'
CHOCOLATE = 'Chocolate'
BASE_CHOICES = [
(VANILLA, 'Vanilla'),
(CHOCOLATE, 'Chocolate'),
]
AVAILABLE_CHOICES = [
(DAILY, 'Daily'),
(WEEKLY, 'Weekly'),
(SEASONAL, 'Seasonal')
]
flavor = models.CharField(max_length=200)
base = models.CharField(max_length=200, choices=BASE_CHOICES)
available = models.CharField(max_length=200, choices=AVAILABLE_CHOICES)
featured = models.BooleanField(default=False)
date_churned = models.DateField('Date Churned', default=datetime.date.today)
likes = models.IntegerField(default=0)
def __str__(self):
return self.flavor
def get_absolute_url(self):
return reverse('ice_cream:index')
| UTF-8 | Python | false | false | 959 | py | 9 | models.py | 5 | 0.643379 | 0.632951 | 0 | 40 | 22.975 | 80 |
julieweeds/Compositionality | 16,956,530,902,032 | 4c926cee3e60e21e19e795f203cff028ffb9def1 | 63c756e81b9a248dea50c60a273c5c83121f7113 | /compositionality/comparison.py | 48945a5c5d948531bd57924f621c0239e79233f4 | []
| no_license | https://github.com/julieweeds/Compositionality | 01bec8d3dc4a4f769a90ddc4477873c884c2cdf3 | c6e97ec26e872f25259950704292979062fead42 | refs/heads/master | 2021-01-10T09:29:11.671915 | 2016-03-22T10:15:46 | 2016-03-22T10:15:46 | 43,299,991 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'juliewe'
#compare observed and composed vectors, correlate with compositionality judgements
import compounds,sys, ConfigParser,ast, nouncompounds, numpy as np, composition,math
from simEngine import SimEngine
def getValue(text):
# extract 0.25 from offsetting:0.25
fields=text.split(":")
return float(fields[1])
class Comparator():
key1="observed"
key2="composed"
offsetting=1.0 #offset the dependency vector before composition (False/0 for baseline)
def __init__(self,configfile):
self.config=ConfigParser.RawConfigParser()
self.config.read(configfile)
self.exp_type=self.config.get('default','exp_type') # set this to anything other than 'compounds' if not wanting to load the phrasal compounds and use other config options
try:
self.parentdir=self.config.get('default','parentdir')
except:
self.parentdir=""
self.filenames={}
self.filenames[Comparator.key1]=self.parentdir+self.config.get('default','observedfile')
try:
self.simmetric=self.config.get('default','simmetric')
except:
self.simmetric="cosine"
try:
self.vtype=self.config.get('default','vtype')
except:
self.vtype="apt"
if self.exp_type=="compounds":
self.setup_compounds_exp(configfile)
if 'observed' not in self.skip:
self.compounder.readcompounds()
self.loadFreqs(self.rels,outfile=self.testcompoundfile)
else:
self.compounder.generate(self.rels,outfile=self.testcompoundfile) #generate list of compounds from observed file
print len(self.compounder.generated_compounds),self.compounder.generated_compounds
#if self.crossvalidate:
# self.compounder.setup_folds(self.nfolds)
# do this later
if 'revectorise' not in self.skip:
print "Revectorising observed phrasal vectors"
self.revectorise_observed(configfile,self.compounder.generated_compounds)
def setup_compounds_exp(self,configfile):
try:
self.skip=ast.literal_eval(self.config.get('default','skip'))
except:
self.skip=[]
self.compounder=compounds.Compounder(configfile)
self.composer = nouncompounds.NounCompounder(["config",configfile])
self.rels=ast.literal_eval(self.config.get('default','rels'))
self.testcompoundfile=self.config.get('compounder','compound_file')
self.reducestring={}
if self.vtype=="byblo":
self.reducestring[Comparator.key1]=".nouns.reduce_1_1"
else:
self.reducestring[Comparator.key1]=".nouns.reduce_0_2"
self.normstring=".filtered"
if self.composer.normalised:
self.normstring+=".norm"
if self.composer.weighting in ['smooth_ppmi','ppmi','pnppmi','gof_ppmi']:
self.weightingstring="."+self.composer.weighting
if self.composer.ppmithreshold>0:
self.weightingstring+="_"+str(self.composer.ppmithreshold)
else: self.weightstring=""
#self.weightingstring=""
self.freqfile=self.filenames[Comparator.key1]+self.reducestring[Comparator.key1]+".rtot"
for type in self.filenames.keys():
self.filenames[type]=self.filenames[type]+self.reducestring.get(type,"")+self.normstring+self.weightingstring
try:
self.offsetting=float(self.config.get('default','offsetting'))
except:
self.offsetting=Comparator.offsetting
try:
self.nfolds=int(self.config.get('default','nfolds'))
trialp=ast.literal_eval(self.config.get('default','trialp'))
self.crossvalidate=True
self.paramdict={}
try:
self.cv_param=self.config.get('default','cv_param')
except:
self.cv_param="offsetting"
self.paramdict[self.cv_param]=trialp
try:
self.repetitions=int(self.config.get('default','repetitions'))
except:
self.repetitions=1
except:
self.nfolds=0
self.paramdict={}
self.crossvalidate=False
self.paramdict["offsetting"]=[self.offsetting]
if self.crossvalidate:
print "Cross-validation: number of folds = "+str(self.nfolds)
print "Number of repetitions = "+str(self.repetitions)
print self.paramdict
print "Default off-setting: ",self.offsetting
else:
print "No cross-validation"
print self.paramdict
def revectorise_observed(self,configfile,phraselist):
vectoriser=composition.Composition(["config",configfile])
vectoriser.options=['revectorise']
vectoriser.run(phraselist)
def generate_SimEngine(self):
if self.composer.untyped:
SimEngine.minorder=0
SimEngine.maxorder=0
if self.exp_type==('compounds'):
simEngine=SimEngine(self.filenames,self.isListedCompound,pathdelim=self.composer.pathdelims[0],saliency=self.composer.saliency,saliencyperpath=self.composer.saliencyperpath)
elif self.exp_type==('simple_compounds'):
simEngine=SimEngine(self.filenames,self.isCompound,pathdelim=self.composer.pathdelims[0],saliency=self.composer.saliency,saliencyperpath=self.composer.saliencyperpath)
return simEngine
def isCompound(self,token):
return len(token.split('|'))==3
def isListedCompound(self,token):
return len(token.split('|'))==3 and token in self.compounder.generated_compounds
def isConstituent(self,token):
lex =token.split('/')[0]
return lex in self.composer.getLeftIndex() or lex in self.composer.getRightIndex()
def loadFreqs(self,rel_list,outfile): #should be part of compounder
print("Loading "+self.freqfile+" for frequency analysis")
with open(outfile,"w") as outstream:
self.compounder.generated_compounds=[]
with open(self.freqfile) as instream:
for line in instream:
line=line.rstrip()
fields=line.split('\t')
parts=fields[0].split('|')
if len(parts)==3 and parts[1] in rel_list:
posparts=parts[2].split('/')
if len(posparts)==2:
if self.compounder.addFreq(fields[0],float(fields[1])):
self.compounder.generated_compounds.append(fields[0])
outstream.write(fields[0]+"\n")
def calcInternalSims(self):
filenames={Comparator.key1:self.filenames[Comparator.key1]}
print "Starting calculation of constituent similarities"
aSimEngine=SimEngine(filenames,include_function=self.isConstituent)
with open("intsims","w") as outstream:
aSimEngine.allpairs(outstream=outstream)
with open("intsims","r") as instream:
for line in instream:
line=line.rstrip()
fields=line.split('\t')
self.compounder.addIntSim(fields[1],fields[2],float(fields[3]))
def correlate(self,instream,parampair=('','')):
for line in instream:
line=line.rstrip()
fields=line.split('\t')
if fields[1]==Comparator.key1 and fields[2]== Comparator.key2:
self.compounder.addAutoSim(fields[0],fields[3])
self.compounder.correlate(show_graph=(not self.crossvalidate))
if self.crossvalidate:
reps=self.repetitions
m=[]
while reps>0:
reps=reps-1
m+=self.compounder.crossvalidate(self.nfolds,p=str(parampair[0])+":"+str(parampair[1]),rep=reps)
return m
else:
return []
def analyse(self,cv_matrix):
#print cv_matrix
testrs=[]
testps=[]
#analyse training performance
#for each fold find best parameter
#for that fold and parameter collect test performance
folds = self.nfolds*self.repetitions
for i in range(0,folds):
besttraining=0 #make 1 for worst, 0 for best
bestindex=-1
for index,line in enumerate(cv_matrix):
if line[1]==i:
if line[2]>besttraining: #make < for worst, > for best
besttraining=line[2]
bestindex=index
testrs.append(cv_matrix[bestindex][3])
testps.append(getValue(cv_matrix[bestindex][0]))
perf=np.mean(testrs)
error=np.std(testrs)/math.sqrt(folds)
print "Cross-validated performance over %s repetitions is %s with error %s"%(str(len(testrs)),str(perf),str(error))
mp=np.mean(testps)
msd=np.std(testps)
print "Mean Chosen parameter settings: ",str(mp),str(msd)
def run(self):
if self.exp_type=='compounds':
cv_matrix=[]
for key in self.paramdict.keys():
for value in self.paramdict[key]:
if 'compose' not in self.skip:
print "Running composer"
self.composer.run(parampair=(key,value)) #run composer to create composed vectors
self.composer.close()
else:
self.composer.outfile=self.composer.getComposedFilename(parampair=(key,value))
simfile=self.composer.outfile+".sims"
if 'sim' not in self.skip:
print "Running sim engine"
print "Reloading observed phrasal vectors"
self.mySimEngine=self.generate_SimEngine() #will load observed vectors
self.mySimEngine.addfile(Comparator.key2,self.composer.outfile) #add composed vector file to SimEngine
with open(simfile,"w") as outstream:
self.mySimEngine.pointwise(outstream,simmetric=self.simmetric)
#self.calcInternalSims()
if 'correlate' not in self.skip:
print "Running correlation"
with open(simfile,'r') as instream:
m=self.correlate(instream,parampair=(key,value))
if len(m)>0:
for line in m:
cv_matrix.append(line)
if len(cv_matrix)>0:
self.analyse(cv_matrix)
else:
print "Reloading observed phrasal vectors"
self.mySimEngine=self.generate_SimEngine() #will load observed vectors
self.mySimEngine.allpairs()
if __name__=="__main__":
myComparator=Comparator(sys.argv[1])
myComparator.run() | UTF-8 | Python | false | false | 11,015 | py | 15 | comparison.py | 7 | 0.592737 | 0.586473 | 0 | 279 | 38.483871 | 185 |
pjalagna/Aryc2019 | 19,370,302,543,251 | 76f7eb363608a8526cb5f9316c245160d53cd5f8 | 6e0001fb880d83d1d3e305e42acba93b85631838 | /Device/EdeviceDev/eDevice/eDeviceDemo/FQA/FQA.py | f9d283c670eee4e03ddefcb588a9ce051183df1f | []
| no_license | https://github.com/pjalagna/Aryc2019 | 97b6de8248006bf19af527e4c1e35763b1277aaa | ece8ccf18305e6c65a59bee650b47b8730904bd0 | refs/heads/master | 2021-06-21T23:14:55.527195 | 2021-03-18T08:02:31 | 2021-03-18T08:02:31 | 205,817,944 | 0 | 1 | null | false | 2020-10-16T00:13:31 | 2019-09-02T09:03:22 | 2020-09-16T07:21:33 | 2020-09-16T07:21:24 | 138,445 | 0 | 0 | 1 | Python | false | false | def FQagent(loc,serviceBox):
"""
pja 11-9-13 tested
pja - 11-5-13
File Queue Agent off of loc \n
serviceBox is a pointer to the service.main
"""
import os
import time
sleepLong = 10 # test = 10 real = 500
sleepShort = 2 #
c = 1
while (c==1):
# get the list of files from iamin to ldir
ldir = os.listdir(loc)
# if len=0
if(len(ldir) == 0): #-1
## sleep long
print("sleepLong")
time.sleep(sleepLong)
else:
#per rec == note single thread loop (spawn for bees)
for f in range(len(ldir)):
# open file
fh = open(loc + ldir[f],'r')
# readit
li = fh.read() # all into string
# close it
fh.close()
serviceBox(li)
# delete file
os.remove(loc + ldir[f])
#endfor
print('sleepShort')
time.sleep(sleepShort)
#endif - 1
#wend
#end FQagent
| UTF-8 | Python | false | false | 1,090 | py | 428 | FQA.py | 201 | 0.453211 | 0.431193 | 0 | 38 | 27.684211 | 64 |
jufei/BtsShell | 19,078,244,763,504 | aa75b38c8731ad807ff05faddb2c888de14b9c45 | a3375aeabc2782d92dc2c3208e5badb00daa3703 | /BtsShell/application_lib/poweron.py | 43eb97a6d58c223cf206d5ee77c76d05eba44525 | []
| no_license | https://github.com/jufei/BtsShell | f256ff573cbbb7a834ae608eb991eb337503f159 | 75487a40ac2cc5f24f70d011ad6cd3924908f783 | refs/heads/master | 2021-01-10T09:25:02.656231 | 2016-03-29T05:42:04 | 2016-03-29T05:42:04 | 54,948,974 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import serial
import os
import sys
import types
import struct
import socket
import logging
import binascii
import string
import random
DEBUG_LEVEL_NONE = 0
POWER_ON_PORT_1 = "01050000FF008C3A"
POWER_OFF_PORT_1 = "010500000000CDCA"
CHECK_PORT_1 = "010200000001B9CA"
POWER_ON_PORT_2 = "01050001FF00DDFA"
POWER_OFF_PORT_2 = "0105000100009C0A"
CHECK_PORT_2 = "010200010001E80A"
POWER_ON_PORT_3 = "01050002FF002DFA"
POWER_OFF_PORT_3 = "0105000200006C0A"
CHECK_PORT_3 = "010200020001180A"
POWER_ON_PORT_4 = "01050003FF007C3A"
POWER_OFF_PORT_4 = "0105000300003DCA"
CHECK_PORT_4 = "01020003000149CA"
POWER_ON_PORT_5 = "01050004FF00CDFB"
POWER_OFF_PORT_5 = "0105000400008C0B"
CHECK_PORT_5 = "010200040001F80B"
POWER_ON_PORT_6 = "01050005FF009C3B"
POWER_OFF_PORT_6 = "010500050000DDCB"
CHECK_PORT_6 = "010200050001A9CB"
DC_ON = (0x40,0x30,0x30,0x57,0x52,0x30,0x30,0x31,0x30,0x30,0x30,0x30,0x32,0x34,0x36,0x2a,0x0d)
AC_ON = (0x40,0x30,0x30,0x57,0x52,0x30,0x30,0x31,0x30,0x30,0x30,0x30,0x31,0x34,0x35,0x2A,0x0D)
POWER_OFF = (0x40,0x30,0x30,0x57,0x52,0x30,0x30,0x31,0x30,0x30,0x30,0x30,0x30,0x34,0x34,0x2a,0x0d)
POWER_ON = (0x40,0x30,0x30,0x57,0x52,0x30,0x30,0x31,0x30,0x30,0x30,0x30,0x33,0x34,0x37,0x2a,0x0d)
class CSocketClient:
def __init__(self, ServerIp = '127.0.0.1', ServerPort = 15004, ClientIp = "127.0.0.1", ClientPort = 12005, ConnectType = 'TCP', TimeOut = 5.0):
self.DebugLevel = DEBUG_LEVEL_NONE
self.IfConneted = False
self.TimeOut = TimeOut
self.ServerIp = ServerIp
self.ServerPort = ServerPort
self.ClientIp = ClientIp
self.ClientPort = ClientPort
self.ConnectType = ConnectType
self.Socket = None
self.__Log = CLogPrinter().CreatLogger('SocketConnection.CSocketClient')
def open(self):
# close previous connection
if self.IfConneted == True:
self.close()
try:
if self.ConnectType == 'TCP':
self.Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif self.ConnectType == 'UDP':
self.Socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
self.__Log.error("Socket type '%s' is invalid!" % self.ConnectType)
self.Socket = None
self.IfConneted = False
return False
self.Socket.settimeout(self.TimeOut)
self.Socket.bind((self.ClientIp, self.ClientPort))
self.Socket.connect((self.ServerIp, self.ServerPort))
self.IfConneted = True
self.__Log.debug("'%s' socket '%s:%s' is established!" % (self.ConnectType, self.ServerIp, self.ServerPort))
return True
except Exception, p_ErrInfo:
self.__Log.error(p_ErrInfo)
return False
def close(self):
if self.IfConneted == True:
self.Socket.close()
self.IfConneted = False
self.__Log.debug("'%s' socket '%s:%s' is closed!" % (self.ConnectType, self.ServerIp, self.ServerPort))
return True
def send(self, SendMsg = ''):
p_marshalled = ""
if self.IfConneted == False:
self.__Log.error("'%s' socket '%s:%s' is closed, send failure!" %\
(self.ConnectType, self.ServerIp, self.ServerPort))
return False
for p in SendMsg:
if isinstance(p, types.StringTypes) == False:
p_marshalled += struct.pack("l", socket.htonl(p))
else:
p_marshalled += p
try:
self.Socket.send(p_marshalled)
self.__Log.debug("Send message over '%s' socket '%s:%s' is success!" %\
(self.ConnectType, self.ServerIp, self.ServerPort))
except Exception, p_Err:
self.__Log.error(p_Err)
self.__Log.error("Send message over '%s' socket '%s:%s' is failed!" %\
(self.ConnectType, self.ServerIp, self.ServerPort))
return True
def receive(self, maxMsgSize=4096, recTimeout=10.0):
marshalled = []
msg = ""
if self.TimeOut != recTimeout:
self.TimeOut = recTimeout
try:
if self.TimeOut == 0xFFFFFFFF:
# infinite timeout
self.TimeOut = None
elif self.TimeOut > 0x7FFFFFFF:
# to avoid errors
self.TimeOut = 0x7FFFFFFF
self.Socket.settimeout(self.TimeOut)
(msg, address) = self.Socket.recvfrom(maxMsgSize)
except Exception, p_Error:
self.__Log.error(p_Error)
self.__Log.error("No response received during the timeout '%s'." % recTimeout)
msg = None
return msg
class CLogPrinter:
def __init__(self):
"""Initial logger configuration variable"""
self.__Logger = None
self.__LogTag = 'CheckLog'
def InitConfigLogger(self, LogLevel = 1, LogFileName = 'SocketConnection.log', LogFileMode = 'w', LogOutput = 2):
"""Creat and cofig logger
Log Level:0-DEBUG; 1-INFO; 2-WARN; 3-ERROR; 4-FATAL
LogOutput: 0-Only stdout; 1-Only file; 2-Both stdout and file
"""
#Check input argv
DirName = os.path.dirname(LogFileName)
if not os.path.isdir(DirName):
print "FATAL: Log directrory %s is not exist, please check your configuration" % DirName
sys.exit(1)
#Log level convertion
if LogLevel == 0:
self.__LogLevel = logging.DEBUG
elif LogLevel == 1:
self.__LogLevel = logging.INFO
elif LogLevel == 2:
self.__LogLevel = logging.WARN
elif LogLevel == 3:
self.__LogLevel = logging.ERROR
elif LogLevel == 4:
self.__LogLevel = logging.FATAL
else:
#the defalut log level is info
self.__LogLevel = logging.INFO
#Log output method convertion
if LogOutput == 0:
FileHandlerSwitch = 0
StdHandlerSwitch = 1
elif LogOutput == 1:
FileHandlerSwitch = 1
StdHandlerSwitch = 0
else:
FileHandlerSwitch = 1
StdHandlerSwitch = 1
#Basic config for file handler
if FileHandlerSwitch:
logging.basicConfig(filename = LogFileName,
filemode = LogFileMode,
format = '<%(asctime)s> %(module)s/%(name)s/line %(lineno)d, %(levelname)s: %(message)s',
level = self.__LogLevel)
#creat logger
self.__Logger = logging.getLogger(self.__LogTag)
#creat handler
self.__Handler = logging.StreamHandler(sys.stdout)
#set level
self.__Handler.setLevel(self.__LogLevel)
#set format
p_Formatter = logging.Formatter('%(module)s/%(name)s/%(levelname)s: %(message)s')
self.__Handler.setFormatter(p_Formatter)
##set filter
#filter=logging.Filter('tester')
#self.handler.addFilter(filter)
#load handler to logger
if StdHandlerSwitch:
self.__Logger.addHandler(self.__Handler)
return True
def CreatLogger(self, LogTag = 'A2A'):
"""Creat a logger
Input:[1]LogTag, seem like 'A2ALog.main', 'A2ALog.common', 'A2ALog.common.find'
Output:[1]logger for print
"""
self.__LogTag = LogTag
self.__Logger = logging.getLogger(self.__LogTag)
return self.__Logger
def DisableDebugLog(self):
"""Set log level
Log Level:0-DEBUG; 1-INFO; 2-WARN; 3-ERROR; 4-FATAL
"""
#self.__Handler.setLevel(self.__LogLevel)
logging.disable(logging.DEBUG)
return True
def CloseLogger(self):
"""Close logger"""
self.__Handler.flush()
self.__Logger.removeHandler(self.__Handler)
return True
def ConfigLogger(LogFile):
"""Config logger for application"""
p_LogFileTmp = LogFile
p_LogPrtTmp = CLogPrinter()
p_LogPrtTmp.InitConfigLogger(0, p_LogFileTmp, 'a')
p_LogTmp = p_LogPrtTmp.CreatLogger('SocketConnection.Main')
return (p_LogPrtTmp, p_LogTmp)
def ReadMsg(MsgExcelPath):
""""""
p_MsgExcelPath = MsgExcelPath
p_Logging = CLogPrinter().CreatLogger('SocketConnection.ReadMsg')
p_MsgBody = ""
if not os.path.isfile(p_MsgExcelPath):
p_Logging.error("Excel %s does not exists!" % p_MsgExcelPath)
return p_MsgBody
try:
p_MsgExcel = xlrd.open_workbook(p_MsgExcelPath)
if 'Message' in p_MsgExcel.sheet_names():
p_MsgSheet = p_MsgExcel.sheet_by_name('Message')
p_ColList = p_MsgSheet.row_values(0)
if ("Value") in p_ColList and (p_MsgSheet.nrows > 1):
p_ElementValueCol = p_ColList.index("Value")
for p_ElementValue in p_MsgSheet.col_values(p_ElementValueCol)[1:]:
p_ElementValue = str(p_ElementValue).upper().replace('0X', '')
p_MsgBody = string.join((p_MsgBody, p_ElementValue), '')
else:
p_Logging.error("Message body is empty!")
else:
p_Logging.error("There is no message in excel!")
return p_MsgBody
except Exception, p_Error:
p_Logging.error(p_Error)
return ""
def ReadConnectInfo(MsgExcelPath):
""""""
p_MsgExcelPath = MsgExcelPath
p_Logging = CLogPrinter().CreatLogger('SocketConnection.ReadConnectInfo')
Src_IP = None
Des_IP = None
Src_Port = None
Des_Port = None
Protocol_Type = None
p_ConnectInfo = {}
if not os.path.isfile(p_MsgExcelPath):
p_Logging.error("Excel %s does not exists!" % p_MsgExcelPath)
return (Src_IP, Src_Port, Des_IP, Des_Port, Protocol_Type)
try:
p_MsgExcel = xlrd.open_workbook(p_MsgExcelPath)
if 'IP' in p_MsgExcel.sheet_names():
p_IpSheet = p_MsgExcel.sheet_by_name('IP')
p_ColList = p_IpSheet.row_values(0)
p_ValueList = p_IpSheet.row_values(1)
for p_No in xrange(len(p_ColList)):
p_ConnectInfo[p_IpSheet.cell_value(0, p_No)] = p_IpSheet.cell_value(1, p_No)
if [u'Src_IP', u'Des_IP', u'Src_Port', u'Des_Port', u'Protocol_Type'] == p_ConnectInfo.keys():
Src_IP = p_ConnectInfo[u'Src_IP']
Des_IP = p_ConnectInfo[u'Des_IP']
Src_Port = p_ConnectInfo[u'Src_Port']
Des_Port = p_ConnectInfo[u'Des_Port']
Protocol_Type = p_ConnectInfo[u'Protocol_Type']
else:
p_Logging.error("Connection informaiton is empty!")
else:
p_Logging.error("There is no IP information in excel!")
return (Src_IP, Src_Port, Des_IP, Des_Port, Protocol_Type)
except Exception, p_Error:
p_Logging.error(p_Error)
return (Src_IP, Src_Port, Des_IP, Des_Port, Protocol_Type)
def tm500_power_on(port='COM1'):
"""This keyword power on TM500.
| Input Parameters | Man. | Description |
| port | Yes | com port |
Example
| Tm500 Power On | COM1 |
"""
try:
ser=serial.Serial(port,baudrate=9600,bytesize=7,parity='E',xonxoff=1,stopbits=2,timeout=0)
except:
raise Exception,"Open Serial failed port='%s' " %port
POWER_ON_CMD= '%c'* len(POWER_ON) % POWER_ON
try:
ser.write(POWER_ON_CMD)
print "TM500 Power ON"
finally:
ser.close()
def tm500_power_off(port='COM1'):
"""This keyword power off TM500.
| Input Parameters | Man. | Description |
| port | Yes | com port |
Example
| Tm500 Power Off | COM1 |
"""
try:
ser=serial.Serial(port,baudrate=9600,bytesize=7,parity='E',xonxoff=1,stopbits=2,timeout=0)
except:
raise Exception,"Open Serial failed port='%s' " %port
AC_OFF_CMD = '%c'* len(DC_ON) % DC_ON
try:
ser.write(AC_OFF_CMD)
print "TM500 Power OFF"
finally:
ser.close()
def power_on(vendor_info='COM1'):
"""This keyword power on BTS.
| Input Parameters | Man. | Description |
| vendor_info | Yes | PB COM port or IP |
Example
| Power On ||
| Power On | ${POWER_BREAK_IP}:${POWER_BREAK_OUTPUT}:${BTS_CONTROL_PC_LAB} |
"""
tmp = vendor_info.split(':')
if 3 == len(tmp):
PowerBreaker_Ip = tmp[0]
PowerBreaker_Port = tmp[1]
BtsControlPC_ip = tmp[2]
Log_Path = os.path.join(os.getcwd(), "SocketConnection.log")
(p_LogPrtTmp, p_LogTmp) = ConfigLogger(Log_Path)
p_Logging = CLogPrinter().CreatLogger('SocketConnection.Main')
p_Socket = None
if '1' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_1
POWER_ON_PORT = POWER_ON_PORT_1
elif '2' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_2
POWER_ON_PORT = POWER_ON_PORT_2
elif '3' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_3
POWER_ON_PORT = POWER_ON_PORT_3
elif '4' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_4
POWER_ON_PORT = POWER_ON_PORT_4
elif '5' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_5
POWER_ON_PORT = POWER_ON_PORT_5
elif '6' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_6
POWER_ON_PORT = POWER_ON_PORT_6
else:
raise Exception, "PowerBreaker_Port out of range!"
try:
p_Socket = CSocketClient(PowerBreaker_Ip, 4001, BtsControlPC_ip, random.randint(1025, 2047) , "TCP", 5)
if p_Socket.open():
# power on
if p_Socket.send(binascii.a2b_hex(POWER_ON_PORT)):
p_Logging.debug("Send message POWER_ON_PORT_%s %s over socke success!" \
% (PowerBreaker_Port, POWER_ON_PORT))
else:
raise Exception, "Send message POWER_ON_PORT_%s failed!" % PowerBreaker_Port
p_Logging.debug("Receive POWER ON PORT %s message--> %s" \
% (PowerBreaker_Port ,binascii.b2a_hex(p_Socket.receive())))
#check port
if p_Socket.send(binascii.a2b_hex(CHECK_PORT)):
p_Logging.debug("Send message CHECK_PORT_%s %s over socke success!" \
% (PowerBreaker_Port, CHECK_PORT))
else:
raise Exception, "Send message CHECK_PORT_%s failed!" % PowerBreaker_Port
p_Logging.debug("Receive CHECK PORT %s closed message--> %s" \
% (PowerBreaker_Port, binascii.b2a_hex(p_Socket.receive())))
else:
raise Exception, "Socket can't be established, send message failure!"
except Exception, p_Err:
p_Logging.error(p_Err)
finally:
if p_Socket:
p_Logging.info("Socket release!")
p_Socket.close()
else:
port = vendor_info
try:
ser=serial.Serial(port,baudrate=9600,bytesize=7,parity='E',xonxoff=1,stopbits=2,timeout=0)
except:
raise Exception,"Open Serial failed port='%s' " %port
POWER_ON_CMD = '%c'* len(POWER_ON) % POWER_ON
try:
ser.write(POWER_ON_CMD)
print "BTS Power ON"
finally:
ser.close()
def power_off(vendor_info='COM1'):
"""This keyword power off BTS.
| Input Parameters | Man. | Description |
| vendor_info | Yes | PB COM port or IP |
Example
| Power Off ||
| Power Off | ${POWER_BREAK_IP}:${POWER_BREAK_OUTPUT}:${BTS_CONTROL_PC_LAB} |
"""
tmp = vendor_info.split(':')
if 3 == len(tmp):
PowerBreaker_Ip = tmp[0]
PowerBreaker_Port = tmp[1]
BtsControlPC_ip = tmp[2]
Log_Path = os.path.join(os.getcwd(), "SocketConnection.log")
try:
if os.path.isfile(Log_Path):
os.remove(Log_Path)
if not os.path.isdir(os.path.dirname(Log_Path)):
DirCreate(os.path.dirname(Log_Path))
except:
print "Couldn't remove old log file but continues..."
(p_LogPrtTmp, p_LogTmp) = ConfigLogger(Log_Path)
p_Logging = CLogPrinter().CreatLogger('SocketConnection.Main')
p_Socket = None
if '1' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_1
POWER_OFF_PORT = POWER_OFF_PORT_1
elif '2' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_2
POWER_OFF_PORT = POWER_OFF_PORT_2
elif '3' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_3
POWER_OFF_PORT = POWER_OFF_PORT_3
elif '4' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_4
POWER_OFF_PORT = POWER_OFF_PORT_4
elif '5' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_5
POWER_OFF_PORT = POWER_OFF_PORT_5
elif '6' == PowerBreaker_Port:
CHECK_PORT = CHECK_PORT_6
POWER_OFF_PORT = POWER_OFF_PORT_6
else:
raise Exception, "PowerBreaker_Port out of range!"
try:
p_Socket = CSocketClient(PowerBreaker_Ip, 4001, BtsControlPC_ip, random.randint(1025, 2047) , "TCP", 5)
if p_Socket.open():
# power off
if p_Socket.send(binascii.a2b_hex(POWER_OFF_PORT)):
p_Logging.debug("Send message POWER_OFF_PORT_%s %s over socke success!" \
% (PowerBreaker_Port, POWER_OFF_PORT))
else:
raise Exception, "Send message POWER_OFF_PORT_%s failed!" % PowerBreaker_Port
p_Logging.debug("Receive POWER OFF PORT %s message--> %s" \
% (PowerBreaker_Port ,binascii.b2a_hex(p_Socket.receive())))
#check port
if p_Socket.send(binascii.a2b_hex(CHECK_PORT)):
p_Logging.debug("Send message CHECK_PORT_%s %s over socke success!" \
% (PowerBreaker_Port, CHECK_PORT))
else:
raise Exception, "Send message CHECK_PORT_%s failed!" % PowerBreaker_Port
p_Logging.debug("Receive CHECK PORT %s breaken message--> %s" \
% (PowerBreaker_Port, binascii.b2a_hex(p_Socket.receive())))
else:
raise Exception, "Socket can't be established, send message failure!"
except Exception, p_Err:
p_Logging.error(p_Err)
finally:
if p_Socket:
p_Logging.info("Socket release!")
p_Socket.close()
else:
port = vendor_info
try:
ser=serial.Serial(port,baudrate=9600,bytesize=7,parity='E',xonxoff=1,stopbits=2,timeout=0)
except:
raise Exception,"Open Serial failed port='%s' " %port
DC_OFF_CMD = '%c'* len(AC_ON) % AC_ON
try:
ser.write(DC_OFF_CMD)
print "BTS Power OFF"
finally:
ser.close()
power_on()
if __name__ == "__main__":
#power_on()
#power_on('10.68.160.131:10.140.86.97:6')
pass
| UTF-8 | Python | false | false | 20,000 | py | 382 | poweron.py | 169 | 0.5472 | 0.513 | 0 | 568 | 33.211268 | 147 |
the-carpnter/codewars-level-7-kata | 14,894,946,630,706 | 955e246d813bfe3046a300135a6e3c90d51ddc01 | 03e042e457d1d3475e6883c152ac80a8ef30942c | /find_the_slope.py | fb3ea93885f668be1d94e62e2f54e0efb3d611c3 | []
| no_license | https://github.com/the-carpnter/codewars-level-7-kata | ea36a1a2a42c56abc1fb034c16d2caf94d363060 | 4d8279334040d79604d8e8e97601af0077093c6c | refs/heads/main | 2023-03-10T08:39:36.643182 | 2021-02-20T14:48:27 | 2021-02-20T14:48:27 | 313,869,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def getSlope(p1, p2):
x1, y1 = p1
x2, y2 = p2
return (y2 - y1) / (x2 - x1) if x2 - x1 else None
| UTF-8 | Python | false | false | 108 | py | 327 | find_the_slope.py | 326 | 0.509259 | 0.37963 | 0 | 4 | 26 | 53 |
Kai-Qian/webapps | 18,966,575,597,559 | 88ebd0e5bf10ac97e970efa56522ba8594d9b28a | 0dc230b31ab4dd87875eec6ec082d5d49fba15d2 | /hairReserve/s3.py | 685483affb34db49822c98e1779a8871a69d5ea5 | [
"MIT"
]
| permissive | https://github.com/Kai-Qian/webapps | f660b3aacfc856fe4dd7582486d242b032af1e64 | 079bd25541644f7d9250b1b942010d0950fe725f | refs/heads/master | 2016-09-14T00:13:43.505707 | 2016-04-20T07:46:56 | 2016-04-20T07:46:56 | 56,666,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ConfigParser
import boto
import io
import os
from boto.s3.key import Key
config = ConfigParser.ConfigParser()
config.read("config.ini")
AWS_ACCESS_KEY = config.get('S3', 'AccessKey')
AWS_SECRET_ACCESS_KEY = config.get('S3', 'SecretKey')
S3_BUCKET = config.get('S3', 'Bucket')
S3_ITEM_PREFIX = config.get('S3', 'AndrewID')
# AWS_ACCESS_KEY = os.environ.get('AccessKey')
# AWS_SECRET_ACCESS_KEY = os.environ.get('SecretKey')
# S3_BUCKET = os.environ.get('Bucket')
# S3_ITEM_PREFIX = os.environ.get('AndrewID')
def s3_upload(uploaded_file, id):
s3conn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
print AWS_SECRET_ACCESS_KEY
bucket = s3conn.get_bucket(S3_BUCKET)
k = Key(bucket)
k.key = S3_ITEM_PREFIX + '-' + str(id)
k.content_type = uploaded_file.content_type
if hasattr(uploaded_file, 'temporary_file_path'):
k.set_contents_from_filename(uploaded_file.temporary_file_path())
else:
k.set_contents_from_string(uploaded_file.read())
k.set_canned_acl('public-read')
return k.generate_url(expires_in=0, query_auth=False)
def s3_delete(id):
s3conn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
bucket = s3conn.get_bucket(S3_BUCKET)
k = Key(bucket)
k.key = S3_ITEM_PREFIX + '-' + str(id)
k.delete()
| UTF-8 | Python | false | false | 1,309 | py | 42 | s3.py | 19 | 0.679908 | 0.663102 | 0 | 45 | 28.088889 | 73 |
EZanghi/Design_Patterns | 12,979,391,183,808 | 254df5a14b423e5dd95e85e644c48dc76a14f3e2 | 7b07bd4c094536023b30c662a154012926cac5dc | /Decorator/calculador_de_impostos.py | 2a85fc80d844f29ec853088879d8ec14fed1ca78 | []
| no_license | https://github.com/EZanghi/Design_Patterns | aad4c035cc8d86eb650d5b15f6ffb56b9c4af4d6 | 6b582f98d510ea160834ff21a5d067f93085b382 | refs/heads/master | 2022-12-09T20:10:47.121958 | 2020-08-27T23:45:52 | 2020-08-27T23:45:52 | 284,824,370 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from impostos import ISS, ICMS, PIS, COFINS
class Calculador_de_Impostos(object):
def realiza_calculo(self, orcamento, imposto):
imposto_calculado = imposto.calcula(orcamento)
print(imposto_calculado)
if __name__ == '__main__':
from orcamento import Orcamento, Item
calculador = Calculador_de_Impostos()
orcamento = Orcamento()
orcamento.adiciona_item(Item('ITEM - 1', 100))
orcamento.adiciona_item(Item('ITEM - 2', 50))
orcamento.adiciona_item(Item('ITEM - 3', 400))
print('ISS e ICMS')
calculador.realiza_calculo(orcamento, ISS())
calculador.realiza_calculo(orcamento, ICMS())
print('ISS com ICMS')
calculador.realiza_calculo(orcamento, ISS(ICMS()))
print('PIS e COFINS')
calculador.realiza_calculo(orcamento, PIS())
calculador.realiza_calculo(orcamento, COFINS())
print('PIS com COFINS')
calculador.realiza_calculo(orcamento, PIS(COFINS())) | UTF-8 | Python | false | false | 939 | py | 3 | calculador_de_impostos.py | 2 | 0.681576 | 0.669862 | 0 | 34 | 26.647059 | 56 |
liuyifly06/bubblecount | 1,322,849,968,311 | c0799a9b88d605e9960aa3d6804a7fff304ca64c | a73d935c819353ae57e2396bbec09b22e8e246f2 | /bubblecount/__init__.py | f93f9ddeeda2cadfa0d1c6457f400a9ae7552151 | []
| no_license | https://github.com/liuyifly06/bubblecount | 5473208121ad0b1a11b72cb2137cbab07f72e2e5 | 823dd9d26ec5046784e98a7d7de4679422db3dbf | refs/heads/master | 2020-04-10T15:37:10.773605 | 2016-05-14T18:47:12 | 2016-05-14T18:47:12 | 51,945,699 | 1 | 0 | null | false | 2016-05-19T15:54:48 | 2016-02-17T18:36:00 | 2016-05-05T18:07:40 | 2016-05-19T15:53:45 | 367,930 | 0 | 1 | 3 | Python | null | null | from preprocess import *
from curvature import *
from benchmark import *
| UTF-8 | Python | false | false | 73 | py | 57 | __init__.py | 31 | 0.794521 | 0.794521 | 0 | 3 | 23.333333 | 24 |
AMcManigal/openapi-core | 8,358,006,368,996 | 7f29859831e173b7df973c4e379233494a2d167d | 842cf533f5eb69bc25ca0e6787b20748d642af4c | /openapi_core/responses.py | 5fbaff208554bb27d0cd51ff47c3344ff5d7f478 | [
"BSD-3-Clause"
]
| permissive | https://github.com/AMcManigal/openapi-core | 4a31d9547980d3d5ac9f8267fc0a5ae7249f8744 | 84d36e33c5d55f7466f1a49bf0c5ff8c3d133572 | refs/heads/master | 2020-03-08T10:14:06.192628 | 2018-04-09T14:30:41 | 2018-04-09T14:30:41 | 128,067,360 | 0 | 0 | null | true | 2018-04-04T13:38:58 | 2018-04-04T13:38:58 | 2018-04-04T10:54:05 | 2018-04-04T10:54:15 | 98 | 0 | 0 | 0 | null | false | null | """OpenAPI core responses module"""
from functools import lru_cache
from six import iteritems
from openapi_core.exceptions import InvalidContentType
from openapi_core.media_types import MediaTypeGenerator
from openapi_core.parameters import ParametersGenerator
class Response(object):
def __init__(
self, http_status, description, headers=None, content=None,
links=None):
self.http_status = http_status
self.description = description
self.headers = headers and dict(headers) or {}
self.content = content and dict(content) or {}
self.links = links and dict(links) or {}
def __getitem__(self, mimetype):
try:
return self.content[mimetype]
except KeyError:
raise InvalidContentType(
"Invalid mime type `{0}`".format(mimetype))
class ResponsesGenerator(object):
def __init__(self, dereferencer, schemas_registry):
self.dereferencer = dereferencer
self.schemas_registry = schemas_registry
def generate(self, responses):
for http_status, response in iteritems(responses):
response_deref = self.dereferencer.dereference(response)
description = response_deref['description']
headers = response_deref.get('headers')
content = response_deref.get('content')
media_types = None
if content:
media_types = self.media_types_generator.generate(content)
parameters = None
if headers:
parameters = self.parameters_generator.generate(headers)
yield http_status, Response(
http_status, description,
content=media_types, headers=parameters)
@property
@lru_cache()
def media_types_generator(self):
return MediaTypeGenerator(self.dereferencer, self.schemas_registry)
@property
@lru_cache()
def parameters_generator(self):
return ParametersGenerator(self.dereferencer, self.schemas_registry)
| UTF-8 | Python | false | false | 2,055 | py | 17 | responses.py | 16 | 0.646229 | 0.645742 | 0 | 63 | 31.619048 | 76 |
EireneX/EireneX.github.io | 10,926,396,836,568 | fb66f80a3420dd4348261dcf9851a4fff35c4d2f | 1c10cd5c3bfa463e5a0b745b9d96ee137b9b2ad6 | /freeze.py | 000ebebc652f2c888321136e91b51f9eedcf677d | [
"WTFPL"
]
| permissive | https://github.com/EireneX/EireneX.github.io | 9a143f4367a887f22d2c0150639d7e66ba1e9d70 | f76a3316a1c2036638e1fa080d80bd2c994b645b | refs/heads/master | 2021-01-11T14:17:16.190647 | 2017-03-17T08:45:22 | 2017-03-17T08:45:22 | 81,306,540 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.insert(0, 'project')
from project import main
if __name__ == '__main__':
main.freezer.freeze()
| UTF-8 | Python | false | false | 121 | py | 1 | freeze.py | 1 | 0.644628 | 0.636364 | 0 | 7 | 16.285714 | 29 |
4dw1tz/Hangman-Game | 9,569,187,153,477 | ab24b070f65b7526dcc7cb849d91561a3d287fcc | 1bad4cec52e5625f0f903a5d610955548c4b493d | /Hangman Game.py | ac9a7f8a32edf40c280928ec34860224424d0a05 | []
| no_license | https://github.com/4dw1tz/Hangman-Game | 55a8e1a08b3e9b9cfbebba847b8b5caaf1c5082c | f3447e5e207a6275e5489e92794e0226b99231c4 | refs/heads/main | 2023-06-01T03:31:54.177195 | 2021-06-24T13:36:05 | 2021-06-24T13:36:05 | 379,931,884 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Hangman Game
from tkinter import *
import random
from turtle import *
speed(11)
def Draw(Lives):
global Word
penup()
width(10)
if Lives==10:
setpos(0,-150)
pendown()
forward(150)
elif Lives==9:
setpos(150,-150)
left(90)
pendown()
forward(300)
elif Lives==8:
setpos(150, 150)
left(90)
pendown()
forward(100)
elif Lives==7:
setpos(50, 150)
left(90)
pendown()
forward(50)
elif Lives==6:
setpos(50, 100)
right(90)
fillcolor("red")
begin_fill()
circle(25)
end_fill()
left(90)
elif Lives==5:
setpos(50,50)
pendown()
color("red")
forward(100)
elif Lives==4:
setpos(50,35)
pendown()
left(45)
forward(50)
elif Lives==3:
setpos(50,35)
pendown()
right(90)
forward(50)
left(45)
elif Lives==2:
setpos(50,-50)
pendown()
left(45)
forward(75)
elif Lives==1:
setpos(50,-50)
pendown()
right(90)
forward(75)
left(45)
elif Lives==0:
setpos(40,75)
color("black")
pendown()
write("X")
penup()
setpos(60,75)
pendown()
write("X")
penup()
setpos(60,60)
pendown()
width(5)
right(180)
circle(10,180)
Main.config(text="You were HUNG!!!", bg="red")
Display.config(text=Word, bg="red")
EnterB['state']='disabled'
root.mainloop()
#Setting up the main window
root=Screen()
root.setup(width=500, height=500)
root.title("Hangman by Witty_Coding")
#Bank of words to be used
File=open("nounlist.txt", "r")
List=File.readlines()
Main=Label(text="Guess a Letter", font="ArielBold 25", bg="light green")
Main.pack()
#Random word chosen
Word=random.choice(List)
Word=Word.replace("\n", "")
#print(Word)
global Lives
Lives=11
Output=[]
Output.extend("_"*len(Word))
def Play():
Enter.delete(0, END)
String=((str(Output).replace("', '"," ")).replace("['", "")).replace("']", "")
Display.config(text=String)
if String.replace(" ", "")==Word:
Main.config(text="Weldone! You WIN!!!")
EnterB['state']='disabled'
root.mainloop()
def Pressed():
global Lives
Guess=Enter.get()
if len(Guess)!=1:
Main.config(text="Please guess 1 character!", bg="red")
Play()
else:
Main.config(text="Guess a Letter", bg="light green")
Found=False
pos=0
for Letter in Word:
if Guess==Letter:
Found=True
Output[pos]=Guess
pos+=1
if Found==False:
Lives-=1
penup()
setpos(-100, Lives*25-150)
pendown()
write(Guess, font=("Ariel",20,"normal"))
Draw(Lives)
Play()
String=((str(Output).replace("', '"," ")).replace("['", "")).replace("']", "")
Display=Label(text=String, font="ArielBold 30", bg="light green")
Display.pack()
Enter=Entry()
Enter.pack()
EnterB=Button(text="Enter", height=2, width=10, command=Pressed)
EnterB.pack()
Play()
root.mainloop()
| UTF-8 | Python | false | false | 3,463 | py | 2 | Hangman Game.py | 1 | 0.494369 | 0.448166 | 0 | 147 | 21.557823 | 82 |
yuseungwoo/baekjoon | 15,599,321,226,628 | b5e656248463f17d5cad5b746c9c6c3f7154d0ca | 8a102033a266d39128e4b64aa0780cf67055e196 | /2743.py | c899386d39a237c825ca8a772a33b80fc1d21d4f | []
| no_license | https://github.com/yuseungwoo/baekjoon | 4dec0798b8689b9378121b9d178713c9cf14a53f | 099031e2c4401e27edcdc05bd6c9e6a558b09bb9 | refs/heads/master | 2020-09-03T15:25:40.764723 | 2018-10-08T02:35:27 | 2018-10-08T02:35:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
print(len(input()))
| UTF-8 | Python | false | false | 38 | py | 460 | 2743.py | 459 | 0.605263 | 0.578947 | 0 | 3 | 11.333333 | 19 |
MrVoid918/GAN | 10,058,813,433,165 | c813fab2c9a689a0b98d9750eca93fb55796be95 | 179cf3133d0710a4fa660c030e7dcf20c8bdd408 | /GAN/utils.py | 14a1294e9613a3eb8e1776c4b18da72e2c5ab654 | [
"MIT"
]
| permissive | https://github.com/MrVoid918/GAN | c638a3000a3812880c913261745b2143d793db7e | e36bba650ebe4e36a877d6b0358f8be4184920cd | refs/heads/master | 2023-01-01T03:26:28.016799 | 2020-09-23T10:24:36 | 2020-09-23T10:24:36 | 297,932,326 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import os
def reparamaterize(mu : torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
std = torch.exp(0.5 * var)
eps = torch.randn_like(std)
return eps * std + mu
def save_state(save_dir : str, epoch : int, G, D):
G_path = os.path.join(save_dir, "{}_G.pth".format(epoch))
D_path = os.path.join(save_dir, "{}_D.pth".format(epoch))
torch.save(G.state_dict(), G_path)
torch.save(D.state_dict(), D_path) | UTF-8 | Python | false | false | 451 | py | 9 | utils.py | 9 | 0.62306 | 0.618625 | 0 | 13 | 32.846154 | 76 |
A-Georgiou/Flag-Detector | 11,862,699,707,089 | 88430619b166ba8cb5a5c9154869c4e74eb117e3 | 9ba3259efefd8984fbb7d37fa008322b6299be68 | /flagDetector.py | eec597fc872b698fea8b5e7857436bead3af18d9 | []
| no_license | https://github.com/A-Georgiou/Flag-Detector | b9ea6881529f9ddd29fb0ff430f6d73cc102b637 | 5a5d2c12ccb6c6569d3f09fc8038e2708f81d557 | refs/heads/master | 2023-03-05T10:03:41.232660 | 2021-02-18T22:01:07 | 2021-02-18T22:01:07 | 340,182,391 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
import pyautogui
import cv2
import os
from skimage.metrics import structural_similarity as compare_ssim
import numpy as np
"""
Flag Detector - Click and Drag
"""
class Application():
def __init__(self, master):
self.master = master
self.rect = None
self.x = self.y = 0
self.start_x = None
self.start_y = None
self.curX = None
self.curY = None
self.first_click = None
root.attributes("-transparent", "blue")
root.geometry('500x200') # set new geometry
root.title('Flag Detector')
self.menu_frame = Frame(master, bg="blue")
self.menu_frame.pack(fill=BOTH, expand=YES)
self.buttonBar = Frame(self.menu_frame,bg="white")
self.buttonBar.pack(fill=BOTH,expand=YES)
self.snipButton = Button(self.buttonBar, width=10, command=self.createScreenCanvas, background="purple", text="Scan Flag", font=("Courier", 16))
self.snipButton.pack(expand=YES)
self.master_screen = Toplevel(root)
self.master_screen.withdraw()
self.master_screen.attributes("-transparent", "blue")
self.picture_frame = Frame(self.master_screen, background = "blue")
self.picture_frame.pack(fill=BOTH, expand=YES)
self.FRAME = Label(self.menu_frame, text="", bg="white", fg="black", font=("Courier", 16))
self.FRAME.pack(fill=BOTH, expand=YES)
self.flags = self.generateFlags()
#Imports and converts all images into array
def generateFlags(self):
flags = []
full_path = os.path.abspath("Flags")
directory = os.fsencode(full_path)
for file in os.listdir(directory):
filename = os.fsdecode(file)
filepath = "Flags/" + filename
image2 = cv2.imread(filepath)
flags.append([image2, filename])
print('flags generated')
return flags
"""
Function calculateDifference
Creates RGB Histogram and creates probability prediction score from the comparison between the screenshot and the flags
Uses structural similarity index to detect the difference between each flag and the screenshot
Multiplied by 1/d (due to Bhattacharyya comparison) to give an overall score for each flag
"""
def calculateDifference(self, image):
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resize_height = 150
scale_percent = (resize_height / image_gray.shape[1])*100
resize_width = int(image_gray.shape[0] * scale_percent / 100)
image_gray = cv2.resize(image_gray, (resize_height, resize_width))
minFlag = ""
minFlagVal = 0
for file, filename in self.flags:
image2 = file
image2_gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
#image2_gray = image2
(H,W) = image_gray.shape
image2_gray = cv2.resize(image2_gray, (W,H))
(score, diff) = compare_ssim(image_gray, image2_gray, full=True)
histr_screen = cv2.calcHist([image_gray],[0],None,[256],[0,256])
histr_screen = cv2.normalize(histr_screen, histr_screen).flatten()
histr_flag = cv2.calcHist([image2],[0],None,[256],[0,256])
histr_flag = cv2.normalize(histr_flag, histr_flag).flatten()
d = cv2.compareHist(histr_screen, histr_flag, cv2.HISTCMP_BHATTACHARYYA)
overall_score = score * (1/d)
if overall_score > minFlagVal:
minFlagVal = overall_score
minFlag = filename
self.FRAME['text'] = minFlag.replace(".png", "")
def takeBoundedScreenShot(self, x1, y1, x2, y2):
im = pyautogui.screenshot(region=(x1, y1, x2, y2))
self.master_screen.attributes('-alpha', 0)
open_cv_image = np.array(im)
open_cv_image = open_cv_image[:, :, ::-1].copy()
self.calculateDifference(open_cv_image)
#Draw canvas on selection
def createScreenCanvas(self):
self.master_screen.deiconify()
root.withdraw()
self.screenCanvas = Canvas(self.picture_frame, cursor="cross", bg="grey")
self.screenCanvas.pack(fill=BOTH, expand=YES)
self.screenCanvas.bind("<ButtonPress-1>", self.on_button_press)
self.screenCanvas.bind("<Motion>", self.on_motion)
self.screenCanvas.bind("<ButtonRelease-1>", self.on_button_release)
self.master_screen.attributes('-fullscreen', True)
self.master_screen.attributes('-alpha', 0.15)
self.master_screen.lift()
self.master_screen.attributes("-topmost", True)
def on_button_press(self, event):
self.first_click = True
self.start_x = self.screenCanvas.canvasx(event.x)
self.start_y = self.screenCanvas.canvasy(event.y)
self.rect = self.screenCanvas.create_rectangle(self.x, self.y, 1, 1, outline='red', width=3, fill="blue")
def on_button_release(self, event):
if self.start_x <= self.curX and self.start_y <= self.curY:
self.takeBoundedScreenShot(self.start_x, self.start_y, self.curX - self.start_x, self.curY - self.start_y)
elif self.start_x >= self.curX and self.start_y <= self.curY:
self.takeBoundedScreenShot(self.curX, self.start_y, self.start_x - self.curX, self.curY - self.start_y)
elif self.start_x <= self.curX and self.start_y >= self.curY:
self.takeBoundedScreenShot(self.start_x, self.curY, self.curX - self.start_x, self.start_y - self.curY)
elif self.start_x >= self.curX and self.start_y >= self.curY:
self.takeBoundedScreenShot(self.curX, self.curY, self.start_x - self.curX, self.start_y - self.curY)
self.screenCanvas.destroy()
self.master_screen.withdraw()
root.deiconify()
return event
#When dragging mouse across screen
def on_motion(self, event):
if self.first_click != None:
self.curX, self.curY = (event.x, event.y)
self.screenCanvas.coords(self.rect, self.start_x, self.start_y, self.curX, self.curY)
if __name__ == '__main__':
root = Tk()
app = Application(root)
root.mainloop()
| UTF-8 | Python | false | false | 6,349 | py | 2 | flagDetector.py | 1 | 0.61112 | 0.597259 | 0 | 153 | 40.470588 | 152 |
michaelliqx/AlphaGoZero | 8,615,704,406,650 | 80c686827769f583a020e3f9a68f9c41637f85bb | a42a49ffe7437f7afef72a0f52d88d5df7697380 | /Agent.py | c318821696710253c08df753b469b1db0ffe1278 | []
| no_license | https://github.com/michaelliqx/AlphaGoZero | f5ad4752af9137791f05a0af8112a9725bd94bff | c9b087e25980f9673ead8702a3084d584cd403af | refs/heads/master | 2020-05-26T20:17:11.273972 | 2019-05-24T06:00:10 | 2019-05-24T06:00:10 | 188,360,384 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import random
import MTTS as mc
import time
from game import GameState
import tensorflow as tf
import keras
import config
class User:
def __init__(self,name,state_size,action_size):
self.name = name
self.state_size = state_size
self.action_size = action_size
def act(self):
action = input('Enter your chosen action: ')
pi = np.zeros(self.action_size)
pi[action] = 1
value = None
NN_value = None
return (action, pi, value, NN_value)
class Agent:
def __init__(self,name,state_size,action_size,model):
self.name = name
self.state_size = state_size
self.action_size = action_size
self.model = model
def simulate(self):
| UTF-8 | Python | false | false | 777 | py | 7 | Agent.py | 5 | 0.610039 | 0.608752 | 0 | 37 | 19.648649 | 57 |
etalab-ia/ami-ia-ineris | 3,891,240,417,513 | 725d1b00c603214353594c067b61c3f5a12eb06a | baae0eb93b7a08aece569c44f65ae340f12352f6 | /heka/frontend/src/Besoin3ech/layout.py | cf1f49336c94fd3a1b96ff55e4321ed4ee42305e | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-2-Clause"
]
| permissive | https://github.com/etalab-ia/ami-ia-ineris | 935e908e39a657eade17320cf959d1fe6abaae0e | 94454cbd02434ce0f62a24e47f6ac7a6cdba049a | refs/heads/master | 2023-01-19T00:19:28.425042 | 2023-01-18T17:09:16 | 2023-01-18T17:09:16 | 239,794,042 | 0 | 0 | NOASSERTION | false | 2023-01-18T17:12:38 | 2020-02-11T15:19:49 | 2023-01-18T17:09:47 | 2023-01-18T17:12:37 | 60,468 | 0 | 0 | 17 | Jupyter Notebook | false | false | import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import custom_components as cc
from Besoin3ech.helpers import *
import dash_table
from datetime import datetime as dt
from datetime import date
layout = html.Div(
[
html.H1('Echantillons analysées'),
html.Div(id='non-displayed-sample'),
dbc.Row(
[
dbc.Col(
[
dcc.DatePickerRange(
id='my-date-picker-range-ech',
min_date_allowed=dt(2011, 1, 1),
display_format='DD-MM-YYYY',
initial_visible_month=dt(2012, 1, 1),
# start_date=dt(2018, 1, 1).date(),
# end_date=dt(2018, 1, 31).date(),
clearable=True,
updatemode='bothdates'
),
],width=8),
dbc.Col(
[
html.Button(
'Appliquer filtre',
id='launch-analysis-ech'
),
html.Div(id='test', style={'display': 'none'}),
],width=4),
]
),
html.Br(),
dbc.Row([
dbc.Col([
html.H4('Derniers échantillons en base'),
html.Div([
dash_table.DataTable(
id='treated-samples',
columns=[{'name': 'Charging ...', 'id': 'Charging ...'}],
filter_action="native",
sort_action="native",
sort_mode="multi",
selected_rows=[],
page_action="native",
page_size= 10
),
]),
], width=8),
dbc.Col([
html.Br(),
html.Div(id='buttons-sample', children=[
html.Button(
'Blacklister',
id='blacklist-sample',
style={
'width': '50%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
),
html.Div(id='output-data-blacklist')
]),
], width=4)
]),
dbc.Row([
dbc.Col([
html.H4('Echantillons blacklistés'),
html.Div([
dash_table.DataTable(
id='black-listed-samples',
columns=[{'name': 'Charging ...', 'id': 'Charging ...'}],
filter_action="native",
sort_action="native",
sort_mode="multi",
selected_rows=[],
page_action="native",
page_size= 10
),
]),
], width=8),
dbc.Col([
html.Br(),
html.Div(id='buttons-blacklist-sample', children=[
html.Button(
'Whitelister',
id='white-list-sample',
style={
'width': '50%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
),
html.Div(id='output-data-whitelist')
]),
], width=4)
]),
dbc.Row([
dbc.Col(
[
html.H4('Dernières tâches lancées'),
cc.ChartCard("launched-analysis", "Tâches lancées"),
],width=12),
]),
# html.Div(id='selected-row-ids-samples'),
html.Div(id='intermediate-value-sample', style={'display': 'none'}),
html.Div(id='intermediate-value-sample-blacklisted', children=[html.Button('hidden', id='hidden-button')], style={'display': 'none'}),
html.Div(id='intermediate-value-sample-blacklisted-1', children=[html.Button('hidden', id='hidden-button-1')], style={'display': 'none'}),
html.Div(id='intermediate-value-sample-blacklisted', style={'display': 'none'}),
html.Div(id='test', style={'display': 'none'})
]) | UTF-8 | Python | false | false | 5,101 | py | 102 | layout.py | 46 | 0.365011 | 0.352445 | 0 | 142 | 34.873239 | 146 |
jakerdou/My-Finance-App | 1,864,015,826,947 | 091e484ccc3de22f88dfb5d3fb1f11000eddd67d | 438bcc145dd8b68eee1525759696c48adb19ba24 | /main.py | 2be6763f5a793a7dc54618dd5ccd234f4c3eadbe | []
| no_license | https://github.com/jakerdou/My-Finance-App | 63c58a1b8783a31c69c519c50294256fa387276b | f4125ec15668cecfddfabf43c6e93f09cf1b1277 | refs/heads/master | 2020-12-14T16:36:59.208968 | 2020-01-19T20:44:31 | 2020-01-19T20:44:31 | 234,809,616 | 1 | 0 | null | false | 2020-01-19T20:44:37 | 2020-01-18T23:02:39 | 2020-01-18T23:11:32 | 2020-01-19T20:44:32 | 3 | 0 | 0 | 0 | Python | false | false | import datetime
import AppUser as au
import PaymentMethod as pm
import Transaction as trans
import TransCategory as transCat
#*************************FIXME: get rid of FIXMEs in all files**************************************
userList = []
selection = ""
currUser = 0 #FIXME: need to get this to actually keep track of current user
stars = "***********************************************************"
#FIXME: get rid of this, just using it so i dont have to type it each time
me = au.AppUser()
me.name = "James Robinson"
me.email = "jakerdou@tamu.edu"
myCash = pm.PaymentMethod()
myCash.name = "Cash"
myCash.balance = 450.0
myFood = transCat.TransCategory()
myFood.name = "Food"
myWhata = trans.Transaction()
myWhata.description = "whataburger"
myWhata.amount = 4.20
myWhata.transCategory = myFood
userList.append(me)
me.pmList.append(myCash)
myCash.AddCategory(myFood)
myCash.AddTransaction(myFood, myWhata)
#FIXME: this
while(not (selection == "0")):
print("\n*************************MAIN MENU*************************")
print("* Current User: " + userList[currUser].name + " *")
print("* *")
print("* Enter 0 for: exit *")
print("* Enter 1 for: add user *")
print("* Enter 2 for: add payment method *")
print("* Enter 3 for: add category to payment method *")
print("* Enter 4 for: add transaction to payment method *")
print("* Enter 5 for: view payment methods *")
print("* Enter 6 for: view categories *")
print("* Enter 7 for: view transactions *")
print("* Enter 8 for: switch to different user *")
print(stars + "\n")
selection = input("Enter your selection: ")
print(stars + "\n")
if(selection == "1"):
print()
newUser = au.AppUser()
newUser.name = input("What is the user's name? ")
newUser.email = input("What is the user's email? ")
userList.append(newUser)
print("\nUser Added!\n" + stars + "\n")
if(selection == "2"):
if(len(userList) == 0):
print("\nYou must create a user before adding payment methods.")
print(stars + "\n")
else:
newPM = pm.PaymentMethod()
newPM.name = input("What kind of payment method? ")
newPM.balance = input("What balance is on it? ")
userList[currUser].pmList.append(newPM)
print("\nPayment Method Added!\n" + stars + "\n")
if(selection == "3"):
if(len(userList) == 0 or len(userList[currUser].pmList) == 0):
print("\nYou must create a user with payment methods before adding categories.")
print(stars + "\n")
else:
print("Payment methods for this user: ")
for i in range(len(userList[currUser].pmList)):
print("Enter " + str(i) + " for: " + userList[currUser].pmList[i].name)
pmSelected = int(input("\nWhich payment method would you like to add a category to? "))
#FIXME: need to validate input
newCat = transCat.TransCategory
newCat.name = input("What is the name of the category? ")
expenseYorN = input("Is the category an expense? (Enter 'y' or 'n') ")
if(expenseYorN == "y"):
newCat.isExpense = True
else:
newCat.isExpense = False
userList[currUser].pmList[pmSelected].categoryList.append(newCat)
print("\nCategory Added!\n" + stars + "\n")
if(selection == "4"):
if(len(userList) == 0 or len(userList[currUser].pmList) == 0):
print("\nYou must create a user with payment methods before adding transactions.")
print(stars + "\n")
else:
print("Payment methods for this user: ")
for i in range(len(userList[currUser].pmList)):
print("Enter " + str(i) + " for: " + userList[currUser].pmList[i].name)
pmSelectedIndex = int(input("\nWhich payment method would you like to add a transaction to? "))
pmSelected = userList[currUser].pmList[pmSelectedIndex]
#FIXME: need to validate input
#check if this payment method has categories to add to
if(len(pmSelected.categoryList) == 0):
print("\nYou must first create categories for this payment method.")
print(stars + "\n")
else:
newTrans = trans.Transaction()
#FIXME: need to add date
newTrans.description = input("Enter a description of the transaction: ")
newTrans.amount = float(input("Enter the amount of the transaction: ")) #FIXME: need to validate input
#put transaction in category
print("\nCategories for this payment method:")
for i in range(len(pmSelected.categoryList)):
print("Enter " + str(i) + " for: " + pmSelected.categoryList[i].name)
catSelectedIndex = int(input("\nWhich category does the transaction fit into? "))
catSelected = pmSelected.categoryList[catSelectedIndex]
newTrans.category = catSelected
#add transaction to list
pmSelected.AddTransaction(catSelected, newTrans)
#FIXME: add "x amount has been added/subtracted from balance of y"
print("\nTransaction Added!\n" + stars + "\n")
if(selection == "5"):
#check if they have payment methods
print("Payment Methods for " + userList[currUser].name + "\n")
for i in userList[currUser].pmList:
i.printPM()
print()
print(stars + "\n")
if(selection == "6"):
if(len(userList) == 0 or len(userList[currUser].pmList) == 0):
print("\nYou must create a user with payment methods before viewing categories.")
print(stars + "\n")
else:
print("Payment methods for " + userList[currUser].name + "\n")
for i in range(len(userList[currUser].pmList)):
print("Enter " + str(i) + " for: " + userList[currUser].pmList[i].name)
pmSelectedIndex = int(input("\nWhich payment method would you like to view the categories of? "))
#FIXME: need to validate input
if(len(userList[currUser].pmList[pmSelectedIndex].categoryList) == 0):
print("You must add categories to this payment method before viewing them.")
print(stars + "\n")
else:
print("Categories for " + userList[currUser].pmList[pmSelectedIndex].name + "\n")
for i in userList[currUser].pmList[pmSelectedIndex].categoryList:
i.printCat()
print()
print(stars + "\n")
if(selection == "7"):
if(len(userList) == 0 or len(userList[currUser].pmList) == 0):
print("\nYou must create a user with payment methods before viewing transactions.")
print(stars + "\n")
else:
print("Payment methods for this user: ")
for i in range(len(userList[currUser].pmList)):
print("Enter " + str(i) + " for: " + userList[currUser].pmList[i].name)
pmSelectedIndex = int(input("\nWhich payment method would you like to view the transactions of? "))
pmSelected = userList[currUser].pmList[pmSelectedIndex]
#FIXME: need to validate input
#check if this payment method has categories view
if(len(pmSelected.categoryList) == 0):
print("\nYou must first create categories for this payment method.")
print(stars + "\n")
else:
print("\nCategories for this payment method:")
for i in range(len(pmSelected.categoryList)):
print("Enter " + str(i) + " for: " + pmSelected.categoryList[i].name)
#FIXME: need to add feature that lets you see transactions of all categories
catSelectedIndex = int(input("\nWhich category would you like to view the transactions of? "))
catSelected = pmSelected.categoryList[catSelectedIndex]
if(len(catSelected.catTransList) == 0):
print("\nYou must first add transactions before you can view them.")
print(stars + "\n")
else:
print("TRANSACTIONS\n")
for i in catSelected.catTransList:
i.printTrans()
print()
print(stars + "\n")
if(selection == "8"):
print("this selection is not working yet\n" + stars + "\n")
#QUESTIONS
#Is python the right language for iOS dev?
#Do I need setters and getters for variables?
#Do I need to make variables private?
| UTF-8 | Python | false | false | 9,360 | py | 6 | main.py | 5 | 0.539316 | 0.53515 | 0 | 219 | 40.739726 | 118 |
DiptoChakrabarty/Net_Sec | 14,422,500,186,329 | d1c2f0926f20fab1dbe148ca378db44d35268529 | 3e92b0676c96745e76318701ab5a60515c50dc46 | /ftpcrack.py | d9689270646fca114f3c8db572e86fc3a013b4db | [
"MIT"
]
| permissive | https://github.com/DiptoChakrabarty/Net_Sec | fb323dd01f73d4559c41554491211faf4a0f01ed | dc01627724f55e225367ab0d232b227989e78e92 | refs/heads/master | 2021-01-01T00:51:32.352413 | 2020-02-17T21:19:25 | 2020-02-17T21:19:25 | 239,105,070 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import re
import sys
import itertools
#Connect to ftp server
def connection(ip,user,passwd):
#sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock= socket.socket()
print("Connecting to ip " + ip +"with user " + user + " password " +passwd)
sock.connect((ip,21))
print("Connected")
data=sock.recv(4096).decode()
print(data)
sock.send(("Username:"+user).encode() )
print("Username")
print(data)
data=sock.recv(4096).decode()
sock.send(('Password:' + passwd).encode())
print("Password")
print(data)
data=socket.recv(4096).decode()
sock.send(('Quit').encode())
print("Quit")
sock.close()
return data
alpha="aqzwsxedcrfvtgbyhnujmikolp"
#alpha=list(i for i in aplha)
user="chuck"
passwd=["red","blue","green","redhat"]
ip="192.168.43.3"
#for i in passwd:
#print(connection(ip,user,"redhat"))
from ftplib import FTP
#domain name or server ip:
ftp = FTP(ip)
ftp.login(user= user, passwd = 'redhat') | UTF-8 | Python | false | false | 1,017 | py | 2 | ftpcrack.py | 2 | 0.643068 | 0.620452 | 0 | 58 | 16.551724 | 80 |
farhananwari07/flask-image-processing | 8,443,905,752,240 | aaf5216738f677e21d3d927ee80c4b4ca50f9b92 | 3ea104409b5ab5f1d1928af7d31b4a58b11d220a | /venv/Lib/site-packages/networkx/readwrite/tests/test_text.py | 5f82c7b2126918dbc13232acf252566144d3d23d | [
"Apache-2.0"
]
| permissive | https://github.com/farhananwari07/flask-image-processing | 0103ab0600995a760e27ffc644ffb313de4eaade | a4a4ad717ffd074afbe31cbf8803060764034375 | refs/heads/main | 2023-09-02T01:21:27.328049 | 2021-11-10T07:58:17 | 2021-11-10T07:58:17 | 425,517,466 | 0 | 0 | Apache-2.0 | true | 2021-11-07T13:55:56 | 2021-11-07T13:55:56 | 2021-11-03T07:30:26 | 2021-11-03T07:30:23 | 135,012 | 0 | 0 | 0 | null | false | false | import pytest
import networkx as nx
from textwrap import dedent
def test_directed_tree_str():
# Create a directed forest with labels
graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
for node in graph.nodes:
graph.nodes[node]["label"] = "node_" + chr(ord("a") + node)
node_target = dedent(
"""
╙── 0
├─╼ 1
│ ├─╼ 3
│ └─╼ 4
└─╼ 2
├─╼ 5
└─╼ 6
"""
).strip()
label_target = dedent(
"""
╙── node_a
├─╼ node_b
│ ├─╼ node_d
│ └─╼ node_e
└─╼ node_c
├─╼ node_f
└─╼ node_g
"""
).strip()
# Basic node case
ret = nx.forest_str(graph, with_labels=False)
print(ret)
assert ret == node_target
# Basic label case
ret = nx.forest_str(graph, with_labels=True)
print(ret)
assert ret == label_target
# Custom write function case
lines = []
ret = nx.forest_str(graph, write=lines.append, with_labels=False)
assert ret is None
assert lines == node_target.split("\n")
# Smoke test to ensure passing the print function works. To properly test
# this case we would need to capture stdout. (for potential reference
# implementation see :class:`ubelt.util_stream.CaptureStdout`)
ret = nx.forest_str(graph, write=print)
assert ret is None
def test_empty_graph():
assert nx.forest_str(nx.DiGraph()) == "╙"
assert nx.forest_str(nx.Graph()) == "╙"
def test_directed_multi_tree_forest():
tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
forest = nx.disjoint_union_all([tree1, tree2])
ret = nx.forest_str(forest)
print(ret)
target = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ │ ├─╼ 3
╎ │ └─╼ 4
╎ └─╼ 2
╎ ├─╼ 5
╎ └─╼ 6
╙── 7
├─╼ 8
│ ├─╼ 10
│ └─╼ 11
└─╼ 9
├─╼ 12
└─╼ 13
"""
).strip()
assert ret == target
tree3 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
forest = nx.disjoint_union_all([tree1, tree2, tree3])
ret = nx.forest_str(forest, sources=[0, 14, 7])
print(ret)
target = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ │ ├─╼ 3
╎ │ └─╼ 4
╎ └─╼ 2
╎ ├─╼ 5
╎ └─╼ 6
╟── 14
╎ ├─╼ 15
╎ │ ├─╼ 17
╎ │ └─╼ 18
╎ └─╼ 16
╎ ├─╼ 19
╎ └─╼ 20
╙── 7
├─╼ 8
│ ├─╼ 10
│ └─╼ 11
└─╼ 9
├─╼ 12
└─╼ 13
"""
).strip()
assert ret == target
ret = nx.forest_str(forest, sources=[0, 14, 7], ascii_only=True)
print(ret)
target = dedent(
"""
+-- 0
: |-> 1
: | |-> 3
: | L-> 4
: L-> 2
: |-> 5
: L-> 6
+-- 14
: |-> 15
: | |-> 17
: | L-> 18
: L-> 16
: |-> 19
: L-> 20
+-- 7
|-> 8
| |-> 10
| L-> 11
L-> 9
|-> 12
L-> 13
"""
).strip()
assert ret == target
def test_undirected_multi_tree_forest():
tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
tree2 = nx.relabel_nodes(tree2, {n: n + len(tree1) for n in tree2.nodes})
forest = nx.union(tree1, tree2)
ret = nx.forest_str(forest, sources=[0, 7])
print(ret)
target = dedent(
"""
╟── 0
╎ ├── 1
╎ │ ├── 3
╎ │ └── 4
╎ └── 2
╎ ├── 5
╎ └── 6
╙── 7
├── 8
│ ├── 10
│ └── 11
└── 9
├── 12
└── 13
"""
).strip()
assert ret == target
ret = nx.forest_str(forest, sources=[0, 7], ascii_only=True)
print(ret)
target = dedent(
"""
+-- 0
: |-- 1
: | |-- 3
: | L-- 4
: L-- 2
: |-- 5
: L-- 6
+-- 7
|-- 8
| |-- 10
| L-- 11
L-- 9
|-- 12
L-- 13
"""
).strip()
assert ret == target
def test_undirected_tree_str():
# Create a directed forest with labels
graph = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
# arbitrary starting point
nx.forest_str(graph)
node_target0 = dedent(
"""
╙── 0
├── 1
│ ├── 3
│ └── 4
└── 2
├── 5
└── 6
"""
).strip()
# defined starting point
ret = nx.forest_str(graph, sources=[0])
print(ret)
assert ret == node_target0
# defined starting point
node_target2 = dedent(
"""
╙── 2
├── 0
│ └── 1
│ ├── 3
│ └── 4
├── 5
└── 6
"""
).strip()
ret = nx.forest_str(graph, sources=[2])
print(ret)
assert ret == node_target2
def test_forest_str_errors():
ugraph = nx.complete_graph(3, create_using=nx.Graph)
with pytest.raises(nx.NetworkXNotImplemented):
nx.forest_str(ugraph)
dgraph = nx.complete_graph(3, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXNotImplemented):
nx.forest_str(dgraph)
def test_overspecified_sources():
"""
When sources are directly specified, we wont be able to determine when we
are in the last component, so there will always be a trailing, leftmost
pipe.
"""
graph = nx.disjoint_union_all(
[
nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph),
nx.balanced_tree(r=1, h=2, create_using=nx.DiGraph),
nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph),
]
)
# defined starting point
target1 = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ └─╼ 2
╟── 3
╎ └─╼ 4
╎ └─╼ 5
╟── 6
╎ ├─╼ 7
╎ └─╼ 8
"""
).strip()
target2 = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ └─╼ 2
╟── 3
╎ └─╼ 4
╎ └─╼ 5
╙── 6
├─╼ 7
└─╼ 8
"""
).strip()
lines = []
nx.forest_str(graph, write=lines.append, sources=graph.nodes)
got1 = chr(10).join(lines)
print("got1: ")
print(got1)
lines = []
nx.forest_str(graph, write=lines.append)
got2 = chr(10).join(lines)
print("got2: ")
print(got2)
assert got1 == target1
assert got2 == target2
| UTF-8 | Python | false | false | 7,931 | py | 67 | test_text.py | 65 | 0.405203 | 0.372826 | 0 | 315 | 21.453968 | 77 |
Torment123/iVMCL-Release | 6,107,443,545,714 | fa0f30ede9b57a4630559ee8e6848bf457dbb7a9 | dedb790038c0b52193e23916f7d363275eeb14db | /mmdetection/tools_ivmcl/publish_model.py | d36ce5e136394e1d7d058b7a4dd82ce3c82195a8 | [
"Apache-2.0"
]
| permissive | https://github.com/Torment123/iVMCL-Release | 034f6093a5e9ba003121ed58d99e693e917f11da | 5ffdd70183bc909ed98df2c6ab33478003bafa8e | refs/heads/main | 2022-12-24T23:57:28.256164 | 2020-10-08T22:39:57 | 2020-10-08T22:39:57 | 329,058,761 | 1 | 0 | null | true | 2021-01-12T17:17:20 | 2021-01-12T17:17:20 | 2020-10-08T22:43:10 | 2020-10-08T22:43:07 | 11,718 | 0 | 0 | 0 | null | false | false | import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
state_dict = checkpoint['model']
for k in list(state_dict.keys()):
# if logger is not None:
# logger.info(f'{k}')
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q') and \
not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
state = {
'state_dict': state_dict
}
torch.save(state, out_file)
# sha = subprocess.check_output(['sha256sum', out_file]).decode()
# if out_file.endswith('.pth'):
# out_file_name = out_file[:-4]
# else:
# out_file_name = out_file
# final_file = out_file_name + f'-{sha[:8]}.pth'
# subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,443 | py | 53 | publish_model.py | 43 | 0.606376 | 0.602911 | 0 | 48 | 29.0625 | 70 |
rojas70/parse_rcbht_data | 2,422,361,605,754 | fafed668f5f50e45bb82f37f5bb07b0ef9844658 | 840c5cbd16fcc620a92c03dcb76aab4160ec0ad4 | /my_code/generate_trainning_data_for_success_and_failure.py | f2e1eee200e4e14f2533187bf24f5d394e40250f | [
"BSD-3-Clause"
]
| permissive | https://github.com/rojas70/parse_rcbht_data | 3c91586d921270686d7b81175d79b8ec5c8b1990 | 8113e120e5953febbcabb99ab168a7d6f2147737 | refs/heads/master | 2020-05-22T02:49:52.293995 | 2017-05-03T07:53:35 | 2017-05-03T07:53:35 | 65,885,341 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import shutil
from copy import deepcopy
import util.output_features as output_features
import data_parser.data_folder_parser as data_folder_parser
import feature_extractor.data_feature_extractor as data_feature_extractor
import traceback,sys#,code
# Globals
global DB_PRINT
DB_PRINT=0
def main():
from inc.config import states
from inc.config import levels
from inc.config import axes
# What kind of success_strategy will you analyze
success_strategy='REAL_HIRO_ONE_SA_SUCCESS'
failure_strategy="REAL_HIRO_ONE_SA_ERROR_CHARAC"
strategy=success_strategy # default value. used in hblstates
# Folder names
data_folder_names=[] # Filtered to only take relevant folders
orig_data_folder_names=[]
# Dictionary building blocks
folder_dims={}
dict_dims={}
dict_all={}
allTrialLabels={}
# Set program paths
results_dir="../../../data_zero"
cur_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = cur_dir
os.chdir(base_dir)
# my training data
directory='my_training_data'
# What kind of data should we collect?
# - Success
# - Failure
# + Generate high level states data
# 1. Get data for success tasks for a given success_strategy
strategy=success_strategy
hlb_dir=strategy
if not os.path.exists(os.path.join(base_dir, '..', 'my_training_data', hlb_dir)):
os.makedirs(os.path.join(base_dir, '..', 'my_training_data', hlb_dir))
# Get Folder names
#data_folder_prefix = os.path.join(base_dir, '..', 'my_data', success_strategy)
data_folder_prefix = os.path.join(results_dir, strategy)
orig_data_folder_names = os.listdir(data_folder_prefix)
# Remove undesired folders
for data_folder_name in orig_data_folder_names:
data_folder_names.append(data_folder_name)
# Create a dictionary structure for all trials, RCBHT levels, and axis.
for data_folder_name in data_folder_names:
data_folder_full_path = os.path.join(data_folder_prefix, data_folder_name)
if DB_PRINT:
print data_folder_full_path
print data_folder_full_path
dict_cooked_from_folder = data_folder_parser.parse_folder(data_folder_full_path)
if dict_cooked_from_folder == None:
continue
else:
dict_all[data_folder_name]=dict_cooked_from_folder
if bool(dict_all):
success_dict_all = dict_all;
else:
raise Exception('The success dictionary dict_all is empty')
# Clear up
folder_dims={}
dict_dims={}
dict_all={}
allTrialLabels={}
data_folder_names=[]
orig_data_folder_names=[]
#-------------------------------------------------------------------------
##FAILURE ANALYSIS
#------------------------------------------------------------------------_
strategy=failure_strategy
hlb_dir=strategy
if not os.path.exists(os.path.join(base_dir, '..', 'my_training_data', hlb_dir)):
os.makedirs(os.path.join(base_dir, '..', 'my_training_data', hlb_dir))
# Read failure data
data_folder_prefix = os.path.join(results_dir, failure_strategy)
orig_data_folder_names = os.listdir(data_folder_prefix)
# Remove undesired folders
for data_folder_name in orig_data_folder_names:
data_folder_names.append(data_folder_name)
# Get full path for each folder name
for data_folder_name in data_folder_names:
data_folder_full_path = os.path.join(data_folder_prefix, data_folder_name)
if DB_PRINT:
print data_folder_full_path
# Get dictionary cooked from all folders
print data_folder_full_path
dict_cooked_from_folder = data_folder_parser.parse_folder(data_folder_full_path)
if dict_cooked_from_folder == None:
continue
else:
dict_all[data_folder_name]=dict_cooked_from_folder
# Once dict_cooked_from_folder exists, get dimensions of level/axis for each folder
if bool(dict_all):
fail_dict_all = dict_all;
else:
raise Exception('The failure dictionary dict_all is empty')
# Clear up
folder_dims={}
dict_dims={}
dict_all={}
allTrialLabels={}
data_folder_names=[]
orig_data_folder_names=[]
for level in levels:
folder_dims[level] = {}
for axis in axes:
folder_dims[level][axis]=0
#cook folder_dims for both success&fail samples
for dict_all in [success_dict_all, fail_dict_all]:
for data_folder_name in dict_all:
for level in levels:
for axis in axes:
temp = len(dict_all[data_folder_name][level][axis])
if temp > folder_dims[level][axis]:
folder_dims[level][axis] = temp
#output data for success
dict_all = success_dict_all
for data_folder_name in dict_all:
data_feature_extractor.extract_features(dict_all[data_folder_name],folder_dims)
allTrialLabels[data_folder_name]=deepcopy(dict_all[data_folder_name])
file_for_S_classification = open(os.path.join(base_dir, '..', 'my_training_data', success_strategy, 'training_set_of_success'), 'w')
output_features.output_sample_one_trial(file_for_S_classification, '1', allTrialLabels, os.path.join(base_dir,'..', 'my_training_data', success_strategy, "img_of_success"))
dict_dims={}
dict_all={}
allTrialLabels={}
data_folder_names=[]
orig_data_folder_names=[]
#output data for fail
dict_all = fail_dict_all
for data_folder_name in dict_all:
data_feature_extractor.extract_features(dict_all[data_folder_name],folder_dims)
allTrialLabels[data_folder_name]=deepcopy(dict_all[data_folder_name])
file_for_F_classification = open(os.path.join(base_dir, '..', 'my_training_data', failure_strategy, 'training_set_of_fail'), 'w')
output_features.output_sample_one_trial(file_for_F_classification, '0', allTrialLabels, os.path.join(base_dir,'..', 'my_training_data', failure_strategy, "img_of_fail"));
import pickle
pickle.dump(folder_dims, open(os.path.join(base_dir,'..', 'my_training_data', failure_strategy, "SF_layer_dims.pkl"), "wb"))
# Clear up
folder_dims={}
dict_dims={}
dict_all={}
allTrialLabels={}
data_folder_names=[]
orig_data_folder_names=[]
main();
| UTF-8 | Python | false | false | 6,521 | py | 504 | generate_trainning_data_for_success_and_failure.py | 16 | 0.622604 | 0.621837 | 0 | 184 | 34.440217 | 176 |
lanl/bueno | 14,456,859,945,016 | d0e62de9e134f07f53f33c83f6801ec3dcad5fab | 20c78276520a556752c89ec5d06b875afcc3d0c4 | /bueno/core/bueno.py | 27684c89a83149f6e22ebe200ad62f45b4b062b2 | [
"BSD-3-Clause"
]
| permissive | https://github.com/lanl/bueno | 45bc3366da2dc9f1d37591e6f1901c95e1b9c014 | 41907fa2bdd7e747713321010ad62ddedf449748 | refs/heads/master | 2023-06-08T21:45:38.269378 | 2023-05-25T22:05:00 | 2023-05-25T22:05:18 | 190,777,300 | 9 | 11 | BSD-3-Clause | false | 2022-06-28T01:40:33 | 2019-06-07T16:39:06 | 2021-12-21T04:38:19 | 2022-06-28T01:40:32 | 9,747 | 6 | 3 | 0 | Python | false | false | #
# Copyright (c) 2019-2022 Triad National Security, LLC
# All rights reserved.
#
# This file is part of the bueno project. See the LICENSE file at the
# top-level directory of this distribution for more information.
#
'''
The good stuff typically called by __main__.
'''
import argparse
import sys
import typing
from bueno import _version
from bueno.core import service
from bueno.core import utils
class ArgumentParser:
'''
bueno's argument parser.
'''
def __init__(self) -> None:
self.argp = argparse.ArgumentParser(
description=ArgumentParser._desc(),
allow_abbrev=False
)
@staticmethod
def _desc() -> str:
'''
Returns the description string for bueno.
'''
return 'Utilities for automating reproducible benchmarking.'
def _addargs(self) -> None:
self.argp.add_argument(
'-v', '--version',
help='Displays version information.',
action='version',
version=f'%(prog)s {_version.__version__}'
)
self.argp.add_argument(
'command',
# Consume the remaining arguments for command's use.
nargs=argparse.REMAINDER,
help='Specifies the command to run '
'followed by command-specific arguments.',
choices=service.Factory.available(),
action=ArgumentParser.CommandAction
)
class CommandAction(argparse.Action):
'''
Custom action class used for 'command' argument structure verification.
'''
@typing.no_type_check
def __init__(self, option_strings, dest, nargs, **kwargs):
super().__init__(option_strings, dest, nargs, **kwargs)
@typing.no_type_check
def __call__(self, parser, namespace, values, option_string=None):
if len(values) == 0:
helps = '{} requires one positional argument (none provided).'
parser.print_help()
parser.error(helps.format('bueno'))
setattr(namespace, self.dest, values)
def parse(self) -> argparse.Namespace:
'''
Parses and returns an argparse.Namespace.
'''
self._addargs()
return self.argp.parse_args()
class Bueno:
'''
Implements the bueno service dispatch system.
'''
def __init__(self, pargs: argparse.Namespace) -> None:
service.Factory.build(pargs.command).start()
@staticmethod
def main(pargs: argparse.Namespace) -> None:
'''
Instantiates and runs a bueno service.
'''
Bueno(pargs)
def main() -> None:
'''
bueno's main().
'''
if utils.privileged_user():
ers = '\nRunning this program as root is a bad idea... Exiting now.\n'
sys.exit(ers)
Bueno.main(ArgumentParser().parse())
# vim: ft=python ts=4 sts=4 sw=4 expandtab
| UTF-8 | Python | false | false | 2,942 | py | 89 | bueno.py | 53 | 0.584296 | 0.580218 | 0 | 106 | 26.754717 | 79 |
karthikpappu/pyc_source | 9,113,920,607,420 | bace68226de851f0042838081280005b0a2ad6ed | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/xsrfprobe-2.3.1-py3.6/discovered.cpython-36.py | 7cf8b94f0a6b3757a26b676179fd050eca7733d5 | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.linux-x86_64/egg/xsrfprobe/files/discovered.py
# Compiled at: 2020-01-29 10:31:04
# Size of source mod 2**32: 886 bytes
VULN_LIST = []
STRENGTH_LIST = []
REQUEST_TOKENS = []
INTERNAL_URLS = []
FILES_EXEC = []
FORMS_TESTED = []
SCAN_ERRORS = [] | UTF-8 | Python | false | false | 413 | py | 114,545 | discovered.cpython-36.py | 111,506 | 0.680387 | 0.554479 | 0 | 14 | 28.571429 | 80 |
renzoxpixely/PortafolioDjango | 5,755,256,220,869 | 14bdae73a4fb09f615fd57ace2192259324928d7 | f2618a094e14297c67504c1a1ba7f6a520b33132 | /lib/python3.7/tokenize.py | f5286a93e5ac7a216c9c45f3611d6e585a97ef14 | []
| no_license | https://github.com/renzoxpixely/PortafolioDjango | 225ebb1b572470d6f9cadb8ecd555c9b453512ee | 488d3f026a1efa80c9bd965ed52d82f443a428fc | refs/heads/master | 2022-12-16T19:26:09.765250 | 2019-01-07T01:34:53 | 2019-01-07T01:34:53 | 162,932,313 | 0 | 0 | null | false | 2022-12-08T01:20:34 | 2018-12-24T00:00:16 | 2019-01-07T01:34:55 | 2022-12-08T01:20:34 | 10,349 | 0 | 0 | 7 | Python | false | false | /home/user/miniconda3/lib/python3.7/tokenize.py | UTF-8 | Python | false | false | 47 | py | 48 | tokenize.py | 42 | 0.829787 | 0.765957 | 0 | 1 | 47 | 47 |
bernadinm/sha512-crypt-validate | 14,817,637,216,861 | 40b7227455be12123b64b423c1b81f5eb5a3ce86 | 1d215d5726ef158acea0f75813ed79d52d96402c | /script.py | b149fff2ffc57055be23e370c1cf1823eb99abbc | [
"Apache-2.0"
]
| permissive | https://github.com/bernadinm/sha512-crypt-validate | 3661c99b33623c1179dd5413ad4435f4aed2085b | c17c4facb152b9a7aebeee916ebedf7a993d87b5 | refs/heads/master | 2020-05-16T12:59:04.533022 | 2019-05-22T17:22:40 | 2019-05-22T17:22:40 | 183,061,333 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from passlib.hash import sha512_crypt
import sys
print (sha512_crypt.verify(sys.argv[1], sys.argv[2]))
| UTF-8 | Python | false | false | 104 | py | 2 | script.py | 1 | 0.759615 | 0.682692 | 0 | 4 | 25 | 53 |
KnowledgeLinks/rdfframework | 8,693,013,821,284 | 632068b34a475528f8e669105d1dbe3ac3d39fda | 7b6235e09a4a5674aa34326e252741a16bbd1bd5 | /rdfframework/configuration/__init__.py | 765596356121ff1de59e4dd8c73b5b4fe19d1b1d | [
"MIT"
]
| permissive | https://github.com/KnowledgeLinks/rdfframework | 68a752b3a3f6a9fdbe73281c7094fe929dfa481d | c6b6408b6e90dd166b4981aeaf3a768e46c22ce0 | refs/heads/master | 2020-03-26T11:44:29.229491 | 2019-09-18T07:01:17 | 2019-09-18T07:01:17 | 54,396,138 | 11 | 0 | MIT | false | 2019-09-18T07:01:18 | 2016-03-21T14:34:49 | 2019-02-19T01:34:29 | 2019-09-18T07:01:18 | 68,696 | 4 | 0 | 12 | Python | false | false | """
Configuration Manager
=====================
The configuartion manager is a global manager for the package
:copyright: Copyright (c) 2016 by Michael Stabile and Jeremy Nelson.
:license: To be determined, see LICENSE.txt for details.
"""
from .rdfwconfig import RdfConfigManager
__author__ = "Mike Stabile, Jeremy Nelson"
| UTF-8 | Python | false | false | 328 | py | 153 | __init__.py | 93 | 0.716463 | 0.704268 | 0 | 13 | 24.230769 | 68 |
Azure/azure-sdk-for-python | 12,034,498,399,631 | 2ea415609b57d6e5033257ca7c00937d3267a72e | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/storage/azure-storage-queue/samples/queue_samples_authentication.py | 76cc089c1e20c66111239cfda9d091f6b72e4041 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | https://github.com/Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | false | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | 2023-09-14T16:31:22 | 2023-09-14T21:48:48 | 614,565 | 3,815 | 2,501 | 1,027 | Python | false | false | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: queue_samples_authentication.py
DESCRIPTION:
These samples demonstrate authenticating a client via a connection string,
shared access key, token credential from Azure Active Directory, or by
generating a sas token with which the returned signature can be used with
the credential parameter of any QueueServiceClient or QueueClient.
USAGE:
python queue_samples_authentication.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
2) AZURE_STORAGE_ACCOUNT_URL - the queue service account URL
3) AZURE_STORAGE_ACCOUNT_NAME - the name of the storage account
4) AZURE_STORAGE_ACCESS_KEY - the storage account access key
5) ACTIVE_DIRECTORY_APPLICATION_ID - Azure Active Directory application ID
6) ACTIVE_DIRECTORY_APPLICATION_SECRET - Azure Active Directory application secret
7) ACTIVE_DIRECTORY_TENANT_ID - Azure Active Directory tenant ID
"""
from datetime import datetime, timedelta
import os
class QueueAuthSamples(object):
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
account_url = os.getenv("AZURE_STORAGE_ACCOUNT_URL")
account_name = os.getenv("AZURE_STORAGE_ACCOUNT_NAME")
access_key = os.getenv("AZURE_STORAGE_ACCESS_KEY")
active_directory_application_id = os.getenv("ACTIVE_DIRECTORY_APPLICATION_ID")
active_directory_application_secret = os.getenv("ACTIVE_DIRECTORY_APPLICATION_SECRET")
active_directory_tenant_id = os.getenv("ACTIVE_DIRECTORY_TENANT_ID")
def authentication_by_connection_string(self):
# Instantiate a QueueServiceClient using a connection string
# [START auth_from_connection_string]
from azure.storage.queue import QueueServiceClient
queue_service = QueueServiceClient.from_connection_string(conn_str=self.connection_string)
# [END auth_from_connection_string]
# Get information for the Queue Service
properties = queue_service.get_service_properties()
def authentication_by_shared_key(self):
# Instantiate a QueueServiceClient using a shared access key
# [START create_queue_service_client]
from azure.storage.queue import QueueServiceClient
queue_service = QueueServiceClient(account_url=self.account_url, credential=self.access_key)
# [END create_queue_service_client]
# Get information for the Queue Service
properties = queue_service.get_service_properties()
def authentication_by_active_directory(self):
# [START create_queue_service_client_token]
# Get a token credential for authentication
from azure.identity import ClientSecretCredential
token_credential = ClientSecretCredential(
self.active_directory_tenant_id,
self.active_directory_application_id,
self.active_directory_application_secret
)
# Instantiate a QueueServiceClient using a token credential
from azure.storage.queue import QueueServiceClient
queue_service = QueueServiceClient(account_url=self.account_url, credential=token_credential)
# [END create_queue_service_client_token]
# Get information for the Queue Service
properties = queue_service.get_service_properties()
def authentication_by_shared_access_signature(self):
# Instantiate a QueueServiceClient using a connection string
from azure.storage.queue import QueueServiceClient
queue_service = QueueServiceClient.from_connection_string(conn_str=self.connection_string)
# Create a SAS token to use for authentication of a client
from azure.storage.queue import generate_account_sas, ResourceTypes, AccountSasPermissions
sas_token = generate_account_sas(
self.account_name,
self.access_key,
resource_types=ResourceTypes(service=True),
permission=AccountSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
token_auth_queue_service = QueueServiceClient(account_url=self.account_url, credential=sas_token)
# Get information for the Queue Service
properties = token_auth_queue_service.get_service_properties()
if __name__ == '__main__':
sample = QueueAuthSamples()
sample.authentication_by_connection_string()
sample.authentication_by_shared_key()
sample.authentication_by_active_directory()
sample.authentication_by_shared_access_signature()
| UTF-8 | Python | false | false | 4,902 | py | 7,477 | queue_samples_authentication.py | 5,645 | 0.703386 | 0.70155 | 0 | 113 | 42.380531 | 105 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.