repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
rupol/Computer-Architecture-Lecture | 10,909,216,951,463 | 387682528a4e773a65738037f24a7ef33f818694 | 3430ce172c5f4f143ddf8214e8553a382f25d461 | /01_basics/conversion.py | 6b33466468e8230dd13c71e9818bf08023591828 | []
| no_license | https://github.com/rupol/Computer-Architecture-Lecture | b596ed54d210082d46e2d062b92e50a4e410baed | 0cc5715dabf7aeec9b8cd6d35a0d3cb823b0db2c | refs/heads/master | 2022-12-17T07:51:15.578565 | 2020-09-14T23:34:20 | 2020-09-14T23:34:20 | 292,130,158 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # num = 21
# print(int(num)) # 21
# print(bin(21)) # 0b10101
# print(hex(21)) # 0x15
# print(hex(129038)) # 0x1f80e
# print(int(0x1005d41c0)) # 4301078976
# print(2 ** 8) # 256 - possibilities in an 8 bit computer
# a = 2 ** 32
# b = 2 ** 64
# print(a)
# print(hex(a))
# print(int(0b11001010))
# print(bin(115))
# print(bin(54))
# print(hex(54))
| UTF-8 | Python | false | false | 354 | py | 14 | conversion.py | 7 | 0.59322 | 0.387006 | 0 | 17 | 19.823529 | 59 |
mohitsh/python_work | 16,286,515,989,367 | 3c00880d5ab65703b12054067daeaa35a84e254d | c73fc798764f40ea6fa466a573fb01223e367ce3 | /graph/wordLadder.py | 56a223df4843d6e47b01d453e1ed9c5fd34f6e96 | []
| no_license | https://github.com/mohitsh/python_work | b1385f62104aa6b932f5452ca5c2421526345455 | 223a802dea5cdb73f44a159856c7432983655668 | refs/heads/master | 2020-04-24T00:34:15.427060 | 2018-08-21T19:12:07 | 2018-08-21T19:12:07 | 37,491,449 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from graph2 import Graph, Vertex
def buildGraph(wordFile):
d = {}
g = Graph()
wfile = open(wordFile,'r')
for line in wfile:
word = line[:-1]
#print word
for i in range(len(word)):
bucket = word[:i] + '_' + word[i+1:]
if bucket in d:
d[bucket].append(word)
else:
d[bucket] = [word]
for key in d:
print key, d[key]
print d.keys()
print d.values()
for bucket in d.keys():
for word1 in d[bucket]:
for word2 in d[bucket]:
if word1 != word2:
g.addEdge(word1,word2)
return g
buildGraph('words.txt')
| UTF-8 | Python | false | false | 591 | py | 284 | wordLadder.py | 276 | 0.560068 | 0.544839 | 0 | 32 | 17.4375 | 49 |
jmp85/enigma-python | 14,800,457,352,496 | dbabd5c7e935905bf24d9cc96a80f877c198bcb3 | 443c12b5c2ab634fe974942b35237ab7af31d1c0 | /enigma/__init__.py | 877c0bc34fc3b0a0ba537424c7b34b683c5633be | []
| no_license | https://github.com/jmp85/enigma-python | f1481797a270533f9aefead379ae871d81c46a31 | 22b502a69634f907d664540f945b9da3189af2a8 | refs/heads/master | 2021-01-10T09:12:29.979297 | 2016-02-27T14:23:50 | 2016-02-27T14:23:50 | 52,628,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
__init__.py
The enigma package
'''
| UTF-8 | Python | false | false | 49 | py | 6 | __init__.py | 5 | 0.44898 | 0.44898 | 0 | 6 | 7.166667 | 22 |
aky4wn/pybats | 8,031,588,848,770 | 10f2bcc369c1fde3845f3e52a6021eb2c900ac4e | ac2d144bb8eb06e664eb2f8e16ae3185d0473d1e | /pybats/forecast.py | b2c48e15afa25239739533ff0bf54bc990bf7316 | [
"MIT"
]
| permissive | https://github.com/aky4wn/pybats | f3d64fb48169be4b1767869f512dd38b6abb81f9 | ac47d43b8491e70537cea41a15653c01f8bae652 | refs/heads/master | 2022-04-18T23:29:52.423164 | 2020-04-06T19:30:31 | 2020-04-06T19:30:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ## The standard DGLM forecast functions
import numpy as np
from scipy import stats
from scipy.special import gamma
from pybats.update import update_F
def forecast_aR(mod, k):
Gk = np.linalg.matrix_power(mod.G, k - 1)
a = Gk @ mod.a
R = Gk @ mod.R @ Gk.T
if mod.discount_forecast:
R += (k - 1) * mod.W
return a, R
def forecast_R_cov(mod, k1, k2):
"""
:param mod: model
:param k1: 1st Forecast Horizon (smaller)
:param k2: 2nd Forecast Horizon (larger)
:return: State vector covariance across k1, k2. West & Harrison the covariance is defined as Ct(k,j), pg. 106
"""
if k2 < k1:
tmp = k1
k1 = k2
k2 = tmp
Gk = np.linalg.matrix_power(mod.G, k2 - k1)
a, Rk1 = forecast_aR(mod, k1)
return Gk @ Rk1
def forecast_marginal(mod, k, X = None, nsamps = 1, mean_only = False, state_mean_var = False):
"""
Forecast function k steps ahead (marginal)
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
if state_mean_var:
return ft, qt
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2)
if mean_only:
return mod.get_mean(param1, param2)
# Simulate from the forecast distribution
return mod.simulate(param1, param2, nsamps)
def forecast_path(mod, k, X = None, nsamps = 1):
"""
Forecast function for the k-step path
k: steps ahead to forecast
X: array with k rows for the future regression components
nsamps: Number of samples to draw from forecast distribution
"""
samps = np.zeros([nsamps, k])
F = np.copy(mod.F)
for n in range(nsamps):
param1 = mod.param1
param2 = mod.param2
a = np.copy(mod.a)
R = np.copy(mod.R)
for i in range(k):
# Plug in the correct F values
if mod.nregn > 0:
F = update_F(mod, X[i,:], F=F)
# if mod.nregn > 0:
# F[mod.iregn] = X[i,:].reshape(mod.nregn,1)
# Get mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, param1, param2)
# Simulate next observation
samps[n, i] = mod.simulate(param1, param2, nsamps = 1)
# Update based on that observation
param1, param2, ft_star, qt_star = mod.update_conjugate_params(samps[n, i], param1, param2)
# Kalman filter update on the state vector (using Linear Bayes approximation)
m = a + R @ F * (ft_star - ft)/qt
C = R - R @ F @ F.T @ R * (1 - qt_star/qt)/qt
# Get priors a, R for the next time step
a = mod.G @ m
R = mod.G @ C @ mod.G.T
R = (R + R.T)/2
# Discount information
if mod.discount_forecast:
R = R + mod.W
return samps
def forecast_marginal_bindglm(mod, n, k, X=None, nsamps=1, mean_only=False):
"""
Forecast function k steps ahead (marginal)
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# F = np.copy(mod.F)
# if mod.nregn > 0:
# F[mod.iregn] = X.reshape(mod.nregn,1)
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2)
if mean_only:
return mod.get_mean(n, param1, param2)
# Simulate from the forecast distribution
return mod.simulate(n, param1, param2, nsamps)
def forecast_path_dlm(mod, k, X=None, nsamps=1, approx=True):
"""
Forecast function for the k-step path
k: steps ahead to forecast
X: array with k rows for the future regression components
nsamps: Number of samples to draw from forecast distribution
"""
if approx:
mean = np.zeros([k])
cov = np.zeros([k, k])
F = np.copy(mod.F)
Flist = [None for x in range(k)]
Rlist = [None for x in range(k)]
for i in range(k):
# Evolve to the prior at time t + i + 1
a, R = forecast_aR(mod, i + 1)
Rlist[i] = R
# Plug in the correct F values
if mod.nregn > 0:
F = update_F(mod, X[i, :], F=F)
Flist[i] = np.copy(F)
# Find lambda mean and var
ft, qt = mod.get_mean_and_var(F, a, R)
mean[i] = ft
cov[i, i] = qt
# Find covariances with previous lambda values
for j in range(i):
# Covariance matrix between the state vector at times j, i, i > j
cov_ij = np.linalg.matrix_power(mod.G, i - j) @ Rlist[j]
# Covariance between lambda at times j, i
cov[j, i] = cov[i, j] = Flist[j].T @ cov_ij @ Flist[i]
return multivariate_t(mean, cov, mod.n, nsamps)
else:
samps = np.zeros([nsamps, k])
F = np.copy(mod.F)
p = len(F)
## Initialize samples of the state vector and variance from the prior
v = 1.0 / np.random.gamma(shape=mod.n / 2, scale=2 / (mod.n * mod.s[0]), size=nsamps)
thetas = np.array(list(
map(lambda var: np.random.multivariate_normal(mean=mod.a.reshape(-1), cov=var / mod.s * mod.R, size=1).T,
v))).squeeze()
for i in range(k):
# Plug in the correct F values
if mod.nregn > 0:
F = update_F(mod, X[i, :], F=F)
# mean
ft = (thetas @ F).reshape(-1)
# Simulate from the sampling model
samps[:, i] = mod.simulate_from_sampling_model(ft, v, nsamps)
# Evolve the state vector and variance for the next timestep
if mod.discount_forecast:
v = v * np.random.beta(mod.delVar * mod.n / 2, ((1 - mod.delVar) * mod.n) / 2, size=nsamps)
thetas = np.array(list(
map(lambda theta, var: mod.G @ theta + np.random.multivariate_normal(mean=np.zeros(p),
cov=var / mod.s * mod.W,
size=1),
thetas, v))).squeeze()
else:
v = v
thetas = (mod.G @ thetas.T).T
return samps
def multivariate_t(mean, scale, nu, nsamps):
'''
mean = mean
scale = covariance matrix * ((nu-2)/nu)
nu = degrees of freedom
nsamps = # of samples to produce
'''
p = len(mean)
g = np.tile(np.random.gamma(nu/2.,2./nu, nsamps), (p, 1)).T
Z = np.random.multivariate_normal(np.zeros(p), scale, nsamps)
return mean + Z/np.sqrt(g)
def multivariate_t_density(y, mean, scale, nu):
'''
y = vector of observations
mean = mean
scale = covariance matrix * ((nu-2)/nu)
nu = degrees of freedom
'''
y = y.reshape(-1, 1)
mean = mean.reshape(-1, 1)
dim = len(y)
if dim > 1:
constant = gamma((nu + dim) / 2) / (gamma(nu / 2) * np.sqrt((np.pi * nu) ** dim * np.linalg.det(scale)))
dens = (1. + ((y - mean).T @ np.linalg.inv(scale) @ (y - mean)) / nu) ** (-(nu + dim) / 2)
else:
constant = gamma((nu + dim) / 2) / (gamma(nu / 2) * np.sqrt((np.pi * nu) ** dim * scale))
dens = (1. + ((y - mean))**2 / (nu * scale)) ** (-(nu + dim) / 2)
return 1. * constant * dens
def forecast_state_mean_and_var(mod, k = 1, X = None):
"""
Forecast function that returns the mean and variance of lambda = state vector * predictors
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# F = np.copy(mod.F)
# if mod.nregn > 0:
# F[mod.iregn] = X.reshape(mod.nregn, 1)
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
return ft, qt
def forecast_marginal_density_MC(mod, k, X = None, nsamps = 1, y = None):
"""
Returns the log forecast density of an observation y
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# F = np.copy(mod.F)
# if mod.nregn > 0:
# F[mod.iregn] = X.reshape(mod.nregn, 1)
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2)
# Simulate from the conjugate prior
prior_samps = mod.simulate_from_prior(param1, param2, nsamps)
# Get the densities
densities = mod.sampling_density(y, prior_samps)
# Take a Monte Carlo average, and return the mean density, on the log scale
return np.log(np.mean(densities))
| UTF-8 | Python | false | false | 9,356 | py | 30 | forecast.py | 11 | 0.539119 | 0.526935 | 0 | 301 | 30.079734 | 117 |
Mounika1577/mounika_python | 18,021,682,808,230 | 43b769e23cd907826d05442d62385904519ce7ef | 1c2cf4f97996a9a5858c0b1114e1967cc9340f27 | /dijkstra.py | 8035e627fbbb247da517fa259c07884216ffa6fb | []
| no_license | https://github.com/Mounika1577/mounika_python | 59fdb5b6c4c3b3306e53f60fcece5c7aec9c206a | 59de3e02f949af901b933ae32634b87db1cf6de9 | refs/heads/master | 2023-02-25T11:19:50.813030 | 2021-01-31T13:36:15 | 2021-01-31T13:36:15 | 281,108,492 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Dijkstra's Algorithm
graph=[[0,7,9,0,0,14],[7,0,10,15,0,0],[9,10,0,11,0,2],[0,15,11,0,6,0],[0,0,0,6,0,9],[14,0,2,0,9,0]]
V=len(graph)
src=int(input())
d(src)=0
d=[sys.maxint]*V
visited=[False]*V
for i in range(V):
u=minDistance(d,visited) #current vertex
visited[u]=True
for j in range(V):
if(visited[j]==False and d[u]+graph[u][j]<d[j] and graph[u][j]>0):
d[j]=d[u]+graph[u][j]
print(d)
def minDistance(dist,visited):
min=sys.maxint
for v in range(len(dist)):
if(dist[v]<min and dist[v]==False):
min=dist[v]
min_index=v
return min_index | UTF-8 | Python | false | false | 627 | py | 30 | dijkstra.py | 30 | 0.559809 | 0.486443 | 0 | 25 | 24.12 | 99 |
EdHub563/gpa | 11,373,073,410,723 | 77f087f854cd3c4e3252aa9e63da2ecbbc493374 | 0c99e679fb9b3183cb3c6dffe997b3013b3572aa | /web/gpa_calc/gpa/models.py | 325b31c77a31e1f3bcd6e4f4015b344c2780101d | []
| no_license | https://github.com/EdHub563/gpa | b4e44bc945cf4f575bdca2404419f0682b1678e7 | 60421536697f33bbcf96f65c78a4683c02b447e8 | refs/heads/main | 2023-08-21T15:42:58.673001 | 2021-10-30T02:36:29 | 2021-10-30T02:36:29 | 303,064,571 | 0 | 9 | null | false | 2021-10-30T02:36:30 | 2020-10-11T07:18:45 | 2021-10-30T02:31:41 | 2021-10-30T02:36:29 | 474 | 0 | 9 | 0 | Dart | false | false | from django.db import models
# Create your models here.
# pass the current semester's subject wise grades in a dictionary as (subject(str)+credits(str))
# as key and grade(str) as value and one more dictionary as second parameter to GPA_CALC
# like key=semester_count and value=current_cgpa
class GPA_CALC:
def __init__(self, di, cur_cgpa):
self.__score= di.copy()
self.__sgpa=0
self.__cgpa= cur_cgpa
def grade_to_score(self, val):
if val== 'EX':
return 10
elif val== 'A':
return 9
elif val== 'B':
return 8
elif val== 'C':
return 7
elif val== 'D':
return 6
elif val== 'P':
return 5
elif val== 'F':
return 0
def sgpa_calc(self):
total_credits=0
for k, v in self.__score.items(): #last character of key is credit of that subject
self.__sgpa += self.grade_to_score(v) * int(k[-1])
total_credits += int(k[-1])
self.__sgpa /= total_credits
return self.__sgpa
def cgpa_calc(self):
n=1
val=0
for k, v in self.__cgpa.items():
n+=k
val += (self.__sgpa + self.__cgpa[k])/2
self.__cgpa.pop(n-1)
self.__cgpa[n]=val
return self.__cgpa
| UTF-8 | Python | false | false | 1,390 | py | 18 | models.py | 2 | 0.501439 | 0.489928 | 0 | 46 | 29.217391 | 120 |
coxmediagroup/notario | 9,165,460,226,609 | c403ab376f25df5fe99e99d3d1190f8a21927c7f | 60e8a0eef22782f92b43d3af7e54b5b6551f850d | /notario/validators/recursive.py | 18d06cae7d1a943c6c879f4c0a3bda7c4dd9101c | []
| no_license | https://github.com/coxmediagroup/notario | 3c951db4d8cf7d397a84de16269bc1b6dc4aaa9a | e160b97908c8b90889d84d4ce7f7eb7831348cf7 | refs/heads/master | 2018-05-30T14:55:10.070147 | 2012-07-10T20:24:07 | 2012-07-10T20:24:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from notario.exceptions import Invalid
from notario.engine import RecursiveValidator
class BasicRecursiveValidator(object):
"""
Base class for recursive validators, can be sub-classed
for other type of recursive validators but should not be
used directly.
"""
__validator_leaf__ = True
def __init__(self, schema):
self.schema = schema
class AnyObject(BasicRecursiveValidator):
"""
Go over all the items in an dict object and make sure that at least
one of the items validates correctly against the schema provided.
If no items pass it raises ``Invalid``.
.. testsetup:: anyobject
from notario import validate
from notario.validators.recursive import AnyObject
Example usage for single values::
data = {'foo': {{'a':10}, {'b':20}, {'c':40}}}
schema = ('foo', AnyObject(('a', 10)))
validate(data, schema)
Example usage for other data structures::
data = {'foo': [{'b': 10}, {'a': 1}]}
schema = ('foo', AnyObject(('a', 1))
validate(data, schema)
When a single item in the array fails to pass against the validator's schema it
stops further iteration and it will raise an error like:
.. doctest:: anyobject
>>> data = {'foo': {'a':{'a':10}, 'b':{'a':20}, 'c':{'a':20}}}
>>> schema = ('foo', AnyObject(('a', ('a', 90))))
>>> validate(data, schema)
Traceback (most recent call last):
...
Invalid: foo -> a did not contain any valid objects against callable: AnyObject
In this particular validator, it remembers on what key of the dict object
the failure was created and it goes even further giving the key and value
of the object it went against.
"""
def __call__(self, data, tree):
index = len(data) - 1
validator = RecursiveValidator(data, self.schema, [], index=index)
for item_index in range(len(data)):
try:
return validator.leaf(item_index)
except Invalid:
if tree:
tree.pop
pass
msg = "did not contain any valid objects against callable: %s" % self.__class__.__name__
raise Invalid(self.schema, tree, pair='value', msg=msg)
class AllObjects(BasicRecursiveValidator):
"""
For all the objects contained in a dictionary apply the schema passed in
to the validator.
If a single item object fails, it raises ``Invalid``.
.. testsetup:: allobjects
from notario import validate
from notario.validators.recursive import AllObjects
Example usage for single values::
data = {'foo': {{'a':10}, {'a':20}, {'a':20}}}
schema = ('foo', AllObjects(('a', 20)))
validate(data, schema)
Example usage for other data structures::
data = {'foo': [{'a': 1}, {'a': 1}]}
schema = ('foo', AllObjects(('a', 1))
validate(data, schema)
When a single item in the array fails to pass against the validator's schema it
stops further iteration and it will raise an error like:
.. doctest:: allobjects
>>> data = {'foo': {'a':{'a':10}, 'b':{'a':20}, 'c':{'a':20}}}
>>> schema = ('foo', AllObjects(('a', ('a', 90))))
>>> validate(data, schema)
Traceback (most recent call last):
...
Invalid: -> foo -> a -> a -> 10 did not match 90
In this particular validator, it remembers on what key of the dict object
the failure was created and it goes even further giving the key and value
of the object it went against.
"""
def __call__(self, data, tree):
validator = RecursiveValidator(data, self.schema, tree)
validator.validate()
| UTF-8 | Python | false | false | 3,758 | py | 10 | recursive.py | 10 | 0.600053 | 0.588345 | 0 | 122 | 29.803279 | 96 |
AstrorEnales/CodeEval | 5,265,629,944,064 | 21309ead87e36f3a4eb2eac8ce96d0524ea29481 | 92153c926f7dbb766304a28abe6b055e3ba03607 | /Easy/Knight Moves/main.py | acc71fb714502a030ef5e3bc90fa16cba4e37869 | [
"MIT"
]
| permissive | https://github.com/AstrorEnales/CodeEval | c25cac01dcd7cde486db4759cb8ee1b719bb5948 | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | refs/heads/master | 2016-09-01T10:18:34.522449 | 2015-11-12T17:15:05 | 2015-11-12T17:15:05 | 45,317,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
letters = 'abcdefgh'
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
fields = []
letter = letters.index(line[0])
number = int(line[1])
if number >= 3:
if letter >= 1:
fields.append(letters[letter - 1] + str(number - 2))
if letter <= 6:
fields.append(letters[letter + 1] + str(number - 2))
if number <= 6:
if letter >= 1:
fields.append(letters[letter - 1] + str(number + 2))
if letter <= 6:
fields.append(letters[letter + 1] + str(number + 2))
if letter >= 2:
if number >= 2:
fields.append(letters[letter - 2] + str(number - 1))
if number <= 7:
fields.append(letters[letter - 2] + str(number + 1))
if letter <= 5:
if number >= 2:
fields.append(letters[letter + 2] + str(number - 1))
if number <= 7:
fields.append(letters[letter + 2] + str(number + 1))
print(' '.join(sorted(fields)))
lines.close()
| UTF-8 | Python | false | false | 1,237 | py | 95 | main.py | 95 | 0.452708 | 0.426839 | 0 | 36 | 32.361111 | 68 |
kingtub/OpencvExercise | 2,413,771,644,859 | 3455710a003f73e740a61b8938aa767ec79ac640 | da4a74569af5a95a8f79e250ccb664d00ac61a31 | /opencv/pyramid/pyr_up.py | 47d2d7fc109c99b116ae7320327cc6d1cd600172 | []
| no_license | https://github.com/kingtub/OpencvExercise | 7bea3a24334e78a189775e53cd89a7fcdafd4772 | b9765a67855a1abded1d659e9a168b97e0db79ae | refs/heads/master | 2022-11-30T08:14:07.823478 | 2020-08-13T02:38:00 | 2020-08-13T02:38:00 | 271,786,271 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
图像金字塔。这是(高斯采样)向上取样。
'''
import cv2
o = cv2.imread('C:\\D\\testImgs\\chapter12\\image' + '\\p.bmp')
r1 = cv2.pyrUp(o)
r2 = cv2.pyrUp(r1)
r3 = cv2.pyrUp(r2)
cv2.imshow('o', o)
cv2.imshow('r1', r1)
cv2.imshow('r2', r2)
cv2.imshow('r3', r3)
cv2.waitKey()
cv2.destroyAllWindows() | UTF-8 | Python | false | false | 319 | py | 48 | pyr_up.py | 48 | 0.629893 | 0.544484 | 0 | 17 | 15.588235 | 63 |
artemponomarevjetski/python-codility-solutions-artemp | 9,165,460,219,411 | 69f18b17203e3f627e2c0d5c52cfe8aac6b60959 | e717b0acc0eb485e7d804ba5b6fdcc2636f181ad | /equileader-correct-fast.py | f882c8f68ab10939f07d2a043077ba282f832541 | []
| no_license | https://github.com/artemponomarevjetski/python-codility-solutions-artemp | a3aa78ca5e87e812359c7094eea7e9cd438bd473 | aa6fec4b9efbb21aa48e40e6862f88498b22b245 | refs/heads/main | 2023-02-25T02:37:26.815174 | 2022-10-14T04:12:30 | 2022-10-14T04:12:30 | 240,374,263 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 11:35:06 2020
@author: artemponomarev
"""
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
"""
Task description is given on Codility website, https://app.codility.com/programmers/
solution for equileader
"""
N = len(A)
if not (N >= 1 and N <= 1e5):
return 0
if not (min(A) >= -1e9 and max(A) <= 1e9):
return 0
if N == 1:
return 0
if N == 2:
if A[0] == A[1]:
return 1
else:
return 0
A_len = N
candidate = None
candidate_count = 0
# Find out a leader candidate
for index in range(A_len):
if candidate_count == 0:
candidate = A[index]
candidate_count += 1
else:
if A[index] == candidate:
candidate_count += 1
else:
candidate_count -= 1
# Make sure the candidate is the leader
leader_count = len([number for number in A if number == candidate])
if leader_count <= A_len//2:
# The candidate is not the leader
return 0 # return 0, when there is no leaders in A
else:
leader = candidate
equi_leaders = 0
leader_count_so_far = 0
for index in range(A_len):
if A[index] == leader:
leader_count_so_far += 1
if leader_count_so_far > (index+1)//2 \
and leader_count-leader_count_so_far > (A_len-index-1)//2:
# Both the head and tail have leaders of the same value
# as "leader"
equi_leaders += 1
return equi_leaders
print(solution([1, 1, 5, 5, 5, 5]))
#
#Analysis
#Detected time complexity:
#O(N)
#expand allExample tests
#▶ example
#example test ✔OK
#expand allCorrectness tests
#▶ single
#single element ✔OK
#▶ double
#two elements ✔OK
#▶ simple
#simple test ✔OK
#▶ small_random
#small random test with two values, length = ~100 ✔OK
#▶ small
#random + 200 * [MIN_INT] + random ,length = ~300 ✔OK
#expand allPerformance tests
#▶ large_random
#large random test with two values, length = ~50,000 ✔OK
#▶ large
#random(0,1) + 50000 * [0] + random(0, 1), length = ~100,000 ✔OK
#▶ large_range
#1, 2, ..., N, length = ~100,000 ✔OK
#▶ extreme_large
#all the same values
| UTF-8 | Python | false | false | 2,405 | py | 35 | equileader-correct-fast.py | 33 | 0.580059 | 0.541614 | 0 | 90 | 25.3 | 88 |
John-Boccio/FacialExpressionRecognition | 2,886,218,040,733 | 0d816be4cbe0fd75bdfcf5051b60b939692634dc | 3bda096ad622a39b2b05bfacc0a7bb8c3b724eb5 | /data_loader/__init__.py | 954a393d57e93af651c32542f7a0bf11595c6831 | []
| no_license | https://github.com/John-Boccio/FacialExpressionRecognition | 17d2fc26e7d8b726c61fd4f778ef1e45067c5fa5 | 523e37651aed11f13a59bc09d4d1a7a2a10854e9 | refs/heads/master | 2022-10-05T16:25:00.768446 | 2020-05-05T18:56:40 | 2020-05-05T18:56:40 | 206,116,310 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .CK import CKDataset as CKDataset
from .FER2013 import FER2013Dataset as FER2013Dataset
from .ExpW import ExpWDataset as ExpWDataset
| UTF-8 | Python | false | false | 138 | py | 20 | __init__.py | 17 | 0.847826 | 0.76087 | 0 | 3 | 45 | 53 |
JpBongiovanni/UserDashboard | 17,772,574,688,137 | 52fc4096476521e4773a42e59c695a98fc152903 | 2ed6c65b4b7c7052af630fe4fa187896b9d34573 | /test_app/views.py | 57d3c9fced9844032f5f5d13a9cd2dd326a8d884 | []
| no_license | https://github.com/JpBongiovanni/UserDashboard | 26d1293b4a733f752c3d4bc543bea93a4a92df27 | 3df5fe21f12f6e689cd9c0c462cd312c05970904 | refs/heads/master | 2023-02-02T13:56:38.861766 | 2020-12-19T13:16:13 | 2020-12-19T13:16:13 | 321,970,534 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, HttpResponse, redirect
from registration_app.models import User
def admin_dashboard(request):
context = {
"user": User.objects.get(id = request.session['user_id']),
"users": User.objects.all()
}
return render(request, "test_index.html", context)
| UTF-8 | Python | false | false | 314 | py | 3 | views.py | 3 | 0.684713 | 0.684713 | 0 | 9 | 33.666667 | 66 |
utshahansen2604/rpa_demo | 11,424,613,046,091 | 71de63c41621a3c4eabf97f46dd7d6118cab58a0 | 519aa28b6df2f1778fed566e027c4bb06b5406d5 | /rpa_demo_dd_loop.py | 9f4e0a7717e95a9493d150aba6a41406b385b7e7 | []
| no_license | https://github.com/utshahansen2604/rpa_demo | 10734640908dd008cbb8c4dc4b98f51c8d84229f | 75301a37d71f48a3ba3e41956891f3cd2329e2f9 | refs/heads/master | 2020-07-27T02:40:55.026635 | 2019-11-10T14:42:42 | 2019-11-10T14:42:42 | 208,840,551 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
# get the path of ChromeDriverServer
dir = os.path.dirname("C:\\Users\\Utshahan.Sen\\Desktop\\Codes")
chrome_driver_path = dir + "\\chromedriver.exe"
# create a new Chrome session
driver = webdriver.Chrome("C:\\Users\\Utshahan.Sen\\Desktop\\Codes\\chromedriver.exe")
driver.implicitly_wait(15)
driver.maximize_window()
for i in range(0,3):
#Navigate to the application home page
time.sleep(2)
url="https://shell.service-now.com/sp?id=sc_cat_item_guide&sys_id=54b94727dbf1df40bd27f9231d96199c" #Linux/Unix Services Pages -> Linux Server Decommission
driver.get(url)
action=ActionChains(driver)
def but_click(item_xpath): ##Button Click
try:
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH, item_xpath)))
except (TimeoutException) as py_ex:
print(str(py_ex))
except (Exception):
but_click(item_xpath)
#print(str(py_ex) + " :Exception @ "+ str(item_xpath))
driver.find_element_by_xpath(item_xpath).click()
time.sleep(2)
def field_fill(item_xpath,text): ##Field Populate
try:
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH, item_xpath)))
except (TimeoutException) as py_ex:
print(str(py_ex))
driver.find_element_by_xpath(item_xpath).send_keys(text)
#action.move_to_element(item_xpath)
time.sleep(1)
def field_fill_with_dropdown(item_xpath,text): ##Dropdown
try:
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH, item_xpath)))
except (TimeoutException) as py_ex:
print(str(py_ex))
driver.find_element_by_xpath(item_xpath).send_keys(text)
driver.find_element_by_xpath("//*[@class='select2-result-label']").click()
time.sleep(2)
# def select_text(dropdpwn_link,item_link,text):
# Select(driver.find_element_by_xpath(item_link)).select_by_visible_text(text)
# driver.find_element_by_xpath(item_link).send_keys(Keys.ENTER)
# time.sleep(1)
but_click("//*[@id='sp_formfield_sh_linux_unix_services']/label[2]/input")
but_click("//*[@id='sp_formfield_sh_support_maintenance']/div[6]/label/input")
but_click("//*[@id='submit']")
time.sleep(10)
#driver.implicitly_wait(10)
but_click("//*[@id='1f3d8d91dba95740f16ef1951d961902']")
time.sleep(2)
###########################################################
#Form Population
field_fill("//*[@id='sp_formfield_sh_provide_business_justification']","Lorem Ipsum")
field_fill_with_dropdown("//*[@id='s2id_autogen3']","CAINCC-N-B00301")
but_click("//*[@id='sp_formfield_sh_300_server_type']/label[1]/input")
but_click("//*[@id='sp_formfield_sh_1100_is_this_system_a_candidate_for']/label[1]/input")
but_click("//*[@id='sp_formfield_sh_1200_will_this_system_need_to_be_sh']/label[2]/input")
but_click("//*[@id='sp_formfield_sh_1400_is_this_server_part_of_a_tsys']/label[2]/input")
but_click("//*[@id='sp_formfield_sh_1700_it_is_required_that_all_databa']/label[3]/input")
field_fill("//*[@id='sp_formfield_sh_1900_server_primary_role']","Lorem Ipsum")
field_fill("//*[@id='sp_formfield_sh_2000_additional_comments']","Lorem Ipsum")
but_click("//*[@id='sp_formfield_sh_2100_the_recommended_standard_proce']/label[1]/input")
field_fill_with_dropdown("//*[@id='s2id_autogen2']", "TLS -TaCIT")
field_fill("//*[@id='sp_formfield_sh_2400_application_support_email_addr']", "Lorem Ipsum")
field_fill("//*[@id='sp_formfield_sh_2500_application_support_contact_nu']", "Lorem Ipsum")
but_click("//*[@id='sp_formfield_sh_2600_i_confirm_that_i_have_uploaded']")
but_click("//*[@id='sp_formfield_sh_2800_is_this_a_msl_tsystems_proj']/label[2]/input")
but_click("//*[@id='sp_formfield_sh_3000_request_type']/div[4]/label/input")
but_click("//*[@id='submit']")
time.sleep(5)
print("Adding to cart")
add_to_cart=driver.find_element_by_xpath("//*[@id='x537430c9db4af300bd27f9231d96194e']/div/div/div/div/div[2]/div/div[2]/button[1]")
driver.execute_script("arguments[0].click();", add_to_cart)
#try:
# WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH, add_to_cart)))
#except (TimeoutException) as py_ex2:
# print(str(py_ex2))
#
#but_click(add_to_cart)
#driver.quit() | UTF-8 | Python | false | false | 4,897 | py | 4 | rpa_demo_dd_loop.py | 3 | 0.652644 | 0.61895 | 0 | 116 | 41.224138 | 160 |
Gongzq5/leetcode-helper | 10,995,116,316,827 | f56b1ec66f6d5802c8ebe74686791e99ad5801c9 | 6592fd6eb192f0369c6b621582c3a6f801c779ad | /leetcode/__init__.py | ece30b3a8b4e4963d61979af9763fe25def49cf7 | [
"MIT"
]
| permissive | https://github.com/Gongzq5/leetcode-helper | c7d48853b9de33e000fed3572c0d3f9dc0a05094 | 2c44fca7e988da2fd8fb8c1851cbba01032ad8df | refs/heads/main | 2023-07-10T09:01:39.955342 | 2023-06-29T07:38:32 | 2023-06-29T07:38:32 | 373,733,773 | 0 | 1 | MIT | false | 2023-06-29T07:38:33 | 2021-06-04T05:46:26 | 2021-06-04T06:49:39 | 2023-06-29T07:38:32 | 147 | 0 | 1 | 0 | Jupyter Notebook | false | false | from . import array
from . import list
from . import reporter
from . import tree
from . import simple_drawer
| UTF-8 | Python | false | false | 109 | py | 9 | __init__.py | 7 | 0.761468 | 0.761468 | 0 | 5 | 20.8 | 27 |
dylburger/cracking-the-coding-interview-prep | 10,943,576,672,147 | 4acf87cc881f2bdbb0f4a1f57b33bfc704fa67d5 | 666131d3188db46e214238050437d15fe6244d26 | /balanced-brackets/tests.py | a8606def3e74cf3ab43a49bf407ddbf2df71d730 | [
"MIT"
]
| permissive | https://github.com/dylburger/cracking-the-coding-interview-prep | 2c36258ed28922bf896c8a16d98c19a9c1cccc3f | 14fbf4e798cfee2074f26e96091034c3aa9bfb02 | refs/heads/master | 2020-03-18T18:14:08.890931 | 2018-06-06T18:45:11 | 2018-06-06T18:45:11 | 135,079,211 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from are_brackets_balanced import is_balanced
def test_single_balanced_case():
assert is_balanced('[]') is True
assert is_balanced('()') is True
assert is_balanced('{}') is True
def test_trivial_unbalanced_case():
assert is_balanced('[') is False
assert is_balanced('{') is False
assert is_balanced('(') is False
def test_three_balanced_brackets():
assert is_balanced('[({})]') is True
def test_complex_balanced_brackets():
assert is_balanced('[({([])})]') is True
def test_unbalanced_mess():
assert is_balanced('[({([[[]]}[]]][]') is False
| UTF-8 | Python | false | false | 582 | py | 22 | tests.py | 15 | 0.640893 | 0.640893 | 0 | 20 | 28.1 | 51 |
caveness/pants | 10,290,741,667,672 | 892f34d402b319b87f54ebf5d80f750c5d2a198c | 83e0cded019d54593ca5978f313835b9097a2859 | /tests/python/pants_test/engine/exp/test_engine.py | c9cf2294390a4816acf33670421349193835e61d | [
"Apache-2.0"
]
| permissive | https://github.com/caveness/pants | 1cb61709b4156a598b18e3fb55621f0de0ffa4aa | 71b7bbbeb61085bb4312727c0f045dbcce19bebc | refs/heads/master | 2021-01-17T04:24:46.045359 | 2016-02-11T20:52:09 | 2016-02-11T20:52:09 | 50,482,636 | 0 | 0 | null | true | 2016-01-27T04:56:30 | 2016-01-27T04:56:30 | 2016-01-23T23:40:06 | 2016-01-27T02:55:22 | 72,376 | 0 | 0 | 0 | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from contextlib import closing, contextmanager
from pants.build_graph.address import Address
from pants.engine.exp.engine import (Engine, LocalMultiprocessEngine, LocalSerialEngine,
SerializationError)
from pants.engine.exp.examples.planners import (ApacheThriftError, Classpath, JavaSources,
setup_json_scheduler)
from pants.engine.exp.scheduler import BuildRequest, Return, SelectNode
class EngineTest(unittest.TestCase):
def setUp(self):
build_root = os.path.join(os.path.dirname(__file__), 'examples', 'scheduler_inputs')
self.scheduler = setup_json_scheduler(build_root)
self.java = Address.parse('src/java/codegen/simple')
def assert_engine(self, engine):
build_request = BuildRequest(goals=['compile'], addressable_roots=[self.java])
result = engine.execute(build_request)
self.assertEqual({SelectNode(self.java, Classpath, None, None): Return(Classpath(creator='javac'))},
result.root_products)
self.assertIsNone(result.error)
@contextmanager
def multiprocessing_engine(self, pool_size=None):
with closing(LocalMultiprocessEngine(self.scheduler, pool_size=pool_size, debug=True)) as e:
yield e
def test_serial_engine_simple(self):
engine = LocalSerialEngine(self.scheduler)
self.assert_engine(engine)
def test_multiprocess_engine_multi(self):
with self.multiprocessing_engine() as engine:
self.assert_engine(engine)
def test_multiprocess_engine_single(self):
with self.multiprocessing_engine(pool_size=1) as engine:
self.assert_engine(engine)
def test_multiprocess_unpickleable(self):
build_request = BuildRequest(goals=['unpickleable'],
addressable_roots=[self.java])
with self.multiprocessing_engine() as engine:
with self.assertRaises(SerializationError):
engine.execute(build_request)
| UTF-8 | Python | false | false | 2,277 | py | 17 | test_engine.py | 11 | 0.703996 | 0.700483 | 0 | 57 | 38.947368 | 104 |
egeromin/Isomorphisms | 10,660,108,863,285 | 5ed475a3b649b87d04a2af98ef6df267242f9068 | c427c8530635293c207b4ddbc8f87cf81036fb07 | /lstm_state/models.py | 8bcc962c3c9045852bb828a9bcd076a10d31fe1f | []
| no_license | https://github.com/egeromin/Isomorphisms | 91fa9aebfd79cba6d9bde1905df79cdf76ee5cb9 | 7aaf754fb3cb1e0ec062142db39f37dee9fff567 | refs/heads/master | 2022-12-02T06:00:25.007757 | 2018-06-18T20:29:46 | 2018-06-18T20:29:46 | 133,087,439 | 0 | 0 | null | false | 2022-11-22T01:06:06 | 2018-05-11T20:50:42 | 2018-06-18T21:10:38 | 2022-11-22T01:06:06 | 78 | 0 | 0 | 21 | Python | false | false | """
Different models to try out for language modelling.
"""
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import ipdb
from lstm_state import config
class RNNLanguageModel:
def __init__(self):
self.should_save_state = False
self.saved_states = []
self.saver = None
self.saver_path = "checkpoints/lstm-state-ckpt"
def set_saver_path(self, saver_path):
self.saver_path = saver_path
def _zero_state(self, batch_size):
raise NotImplementedError
def zero_state(self, batch_size=None):
if batch_size is None:
batch_size = config.batch_size
return self._zero_state(batch_size)
def get_variables(self):
raise NotImplementedError
def _forward(self, inp, state):
raise NotImplementedError
def save_state(self, state):
raise NotImplementedError
def forward(self, inp, state):
output, next_state = self._forward(inp, state)
if self.should_save_state:
self.saved_states.append(self.save_state(next_state))
return output, next_state
def start_saving_state(self):
self.should_save_state = True
self.saved_states.append(self.save_state(self.zero_state()))
def get_saved_states(self):
saved_states = self.saved_states
self.saved_states = []
self.should_save_state = False
return saved_states
def save(self, itn=None):
if self.saver is None:
self.saver = tfe.Saver(self.get_variables())
saver_path = self.saver_path
if itn is not None:
saver_path += "_{}".format(str(itn).zfill(4))
self.saver.save(saver_path)
def restore(self):
if self.saver is None:
if len(self.get_variables()) == 0:
in_ = tf.zeros((1, 256), tf.float32)
self.forward(in_, self.zero_state(1))
self.saver = tfe.Saver(self.get_variables())
self.saver.restore(self.saver_path)
# def _gather_saveables_for_checkpoint(self):
# """Implementing this to checkpoint the variables
# I've defined"""
# return {
# x.name: x for x in self.get_variables()
# }
class SimpleLSTM(RNNLanguageModel):
def __init__(self):
super().__init__()
self.lstm_size = 256
self.lstm = tf.contrib.rnn.BasicLSTMCell(self.lstm_size)
def _zero_state(self, batch_size):
hidden_state = tf.zeros([batch_size, self.lstm_size])
current_state = tf.zeros([batch_size, self.lstm_size])
return hidden_state, current_state
def save_state(self, state):
return tuple(map(lambda x: x.numpy(), state))
def get_variables(self):
return self.lstm.variables
def _forward(self, inp, state):
return self.lstm(inp, state)
class LSTMWithDense(SimpleLSTM):
def __init__(self):
super().__init__()
self.lstm_size = 400
self.output_size = 256
self.dense1 = tf.layers.Dense(self.lstm_size,
activation=tf.nn.relu)
self.lstm = tf.contrib.rnn.BasicLSTMCell(self.lstm_size)
self.dense2 = tf.layers.Dense(self.output_size)
def get_variables(self):
return self.dense1.variables + self.lstm.variables + \
self.dense2.variables
def _forward(self, inp, state):
lstm_inp = self.dense1(inp)
lstm_out, next_state = self.lstm(lstm_inp, state)
out = self.dense2(lstm_out)
return out, next_state
class StackedLSTMWithDense(RNNLanguageModel):
"""
Stack multiple LSTM cells.
"""
def __init__(self, num_layers=2):
super().__init__()
self.lstm_size = 400
self.output_size = 256
self.lstm = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.BasicLSTMCell(self.lstm_size)
for _ in range(num_layers)])
self.dense1 = tf.layers.Dense(self.lstm_size,
activation=tf.nn.relu)
self.dense2 = tf.layers.Dense(self.output_size)
def _zero_state(self, batch_size):
return self.lstm.zero_state(batch_size, tf.float32)
def save_state(self, state):
def get_numpy(s):
return tuple(map(lambda x: x.numpy(), s))
return tuple(map(get_numpy, state))
def get_variables(self):
return self.dense1.variables + self.lstm.variables + \
self.dense2.variables
def _forward(self, inp, state):
lstm_inp = self.dense1(inp)
lstm_out, next_state = self.lstm(lstm_inp, state)
out = self.dense2(lstm_out)
return out, next_state
| UTF-8 | Python | false | false | 4,704 | py | 19 | models.py | 13 | 0.595876 | 0.587585 | 0 | 157 | 28.955414 | 68 |
hezhenpan/sylwk04 | 3,246,995,317,272 | 793835b55c2b900284412e1bac9ee8d318964dff | 7f100960fa99e9233b3a7cbd8e9d82ed3a9b0d1f | /shiyanlou/spiders/github.py | 16a3b4da4f5c8bce90bd5a32a1a95d0ec6fcc390 | []
| no_license | https://github.com/hezhenpan/sylwk04 | b401e386289986d15fe9f273161334feabaad129 | 51f348dda4751280b962b0dcf4fc631981c1d004 | refs/heads/master | 2020-03-08T15:55:00.270599 | 2018-04-06T10:03:04 | 2018-04-06T10:03:04 | 128,224,812 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from shiyanlou.items import GithubItem
class GithubSpider(scrapy.Spider):
name = 'github'
allowed_domains = ['github.com']
@property
def start_urls(self):
url_tmpl = 'https://github.com/shiyanlou?page={}&tab=repositories'
return (url_tmpl.format(i) for i in range(1, 5))
def parse(self, response):
for repository in response.css('li.public'):
item = GithubItem()
item['name'] = repository.xpath('.//a[@itemprop="name codeRepository"]/text()').re_first("\n\s*(.*)")
item['update_time'] = repository.xpath('.//relative-time/@datetime').extract_first()
other_url = response.urljoin(repository.xpath('.//a/@href').extract_first())
request = scrapy.Request(other_url, callback=self.parse_other)
request.meta['item'] = item
yield request
def parse_other(self, response):
item = response.meta['item']
for numcount in response.css('ul.numbers-summary'):
item['commits'] = numcount.css('li.commits').xpath('.//a/span/text()').extract_first()
item['branches'] = numcount.xpath('.//li[2]/a/span/text()').extract_first()
item['releases'] = numcount.xpath('.//li[3]/a/span/text()').extract_first()
yield item
| UTF-8 | Python | false | false | 1,354 | py | 1 | github.py | 1 | 0.591581 | 0.587888 | 0 | 38 | 34.552632 | 113 |
gevaertlab/MultiModalBrainSurvival | 14,388,140,489,993 | 788ae8501fb77e966a13f44ca3f3877d3c5d6eff | 35996de59c46726504af25c7c372741f0b150ecf | /5_JointFusion/2_JointFusion_savescore.py | 7da0d3e4a59f4987a90cded44f780b18c8c80366 | []
| no_license | https://github.com/gevaertlab/MultiModalBrainSurvival | 4a0536b2b32eb9034879f1341453e7cf703f2dd0 | af5f6b00e956413d21bcb40905bfd7dc43babec7 | refs/heads/main | 2023-08-03T16:55:07.797302 | 2023-07-12T23:36:21 | 2023-07-12T23:36:21 | 542,352,608 | 8 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #########################################
### MODEL SCORES FOR JOINT FUSION MODEL (FFPE+RNA)
#########################################
### This script tfetches the model scores of the multimodal joint fusion model
### - input:
### - 224x224 patches (see 1_WSI2Patches.py)
### - log+z-score transformed RNA values of 12,778 genes (see genes.txt) + config file
### - config file
### - output: model survival predictions per sample
###############################################################################
###############################################################################
### Example command
### $ 2_JointFusion_savescore.py --config "/path/to/config_joint_savescore.json"
###################################################
###################################################
### Set Environment
####################
from __future__ import print_function
from __future__ import division
import torch.nn as nn
from torch.optim import Adam
import torch.multiprocessing
from torch.utils.data import WeightedRandomSampler, SequentialSampler, RandomSampler
import torchvision
from torchvision import datasets, models, transforms
from torchvision.utils import *
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from lifelines.utils import concordance_index
from scipy.special import softmax as scipy_softmax
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import time
import os
import copy
import json
import argparse
from datasets import PatchRNADataset, PatchBagRNADataset
from resnet import resnet50
from models import HistopathologyRNAModel, AggregationModel, AggregationProjectModel, Identity, TanhAttention, CoxLoss, BagHistopathologyRNAModel
from tensorboardX import SummaryWriter
plt.switch_backend('agg')
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
### Functions
###############
def evaluate(model, val_dataloader,device, criterion, mode='val'):
## Validation
model.eval()
output_list = []
wsi_list = []
case_list = []
loss_list = []
survival_months_list = []
vital_status_list = []
for b_idx, batch_dict in enumerate(val_dataloader):
patches = batch_dict['patch_bag'].to(device)
rna_data = batch_dict['rna_data'].to(device)
survival_months = batch_dict['survival_months'].to(device).float()
vital_status = batch_dict['vital_status'].to(device).float()
input_size = patches.size()
wsis = batch_dict['wsi_file_name']
with torch.no_grad():
outputs = model(patches, rna_data)
loss = criterion(outputs.view(-1), survival_months, vital_status)
loss_list.append(loss.item())
output_list.append(outputs.detach().cpu().numpy())
survival_months_list.append(survival_months.detach().cpu().numpy())
vital_status_list.append(vital_status.detach().cpu().numpy())
wsi_list.append(wsis)
case_list.append(batch_dict['case'])
wsi_list = [w for w_b in wsi_list for w in w_b]
case_list = [c for c_b in case_list for c in c_b]
survival_months_list = np.array([s for s_b in survival_months_list for s in s_b])
vital_status_list = np.array([v for v_b in vital_status_list for v in v_b])
output_list = np.concatenate(output_list, axis=0)
wsi_CI, _ = get_survival_CI(output_list, wsi_list, survival_months_list, vital_status_list)
case_CI, pandas_output = get_survival_CI(output_list, case_list, survival_months_list, vital_status_list)
print("{} wsi | CI {:.3f}".format(mode, wsi_CI))
print("{} case | CI {:.3f}".format(mode, case_CI))
val_loss = np.mean(loss_list)
return val_loss, pandas_output
def get_survival_CI(output_list, ids_list, survival_months, vital_status):
ids_unique = sorted(list(set(ids_list)))
id_to_scores = {}
id_to_survival_months = {}
id_to_vital_status = {}
for i in range(len(output_list)):
id = ids_list[i]
id_to_scores[id] = id_to_scores.get(id, []) + [output_list[i, 0]]
id_to_survival_months[id] = survival_months[i]
id_to_vital_status[id] = vital_status[i]
for k in id_to_scores.keys():
id_to_scores[k] = np.mean(id_to_scores[k])
score_list = np.array([id_to_scores[id] for id in ids_unique])
survival_months_list = np.array([id_to_survival_months[id] for id in ids_unique])
vital_status_list = np.array([id_to_vital_status[id] for id in ids_unique])
CI = concordance_index(survival_months_list, -score_list, vital_status_list)
pandas_output = pd.DataFrame({'id': ids_unique, 'score': score_list, 'survival_months': survival_months_list,
'vital_status': vital_status_list})
return CI, pandas_output
def main():
# parse args and load config
args = parser.parse_args()
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
with open(args.config) as f:
config = json.load(f)
if 'flag' in config:
args.flag = config['flag']
if args.flag == "":
args.flag = 'train_{date:%Y-%m-%d %H:%M:%S}'.format(date=datetime.datetime.now())
device = torch.device("cuda:0" if (torch.cuda.is_available() and config['use_cuda']) else "cpu")
num_classes, num_epochs = config['num_classes'], config['num_epochs']
resnet = resnet50(pretrained=config['pretrained'])
aggregator = None
if config['aggregator'] == 'identity':
aggregator = Identity()
elif config['aggregator'] == "attention":
aggregator = TanhAttention(dim=2048)
elif config['aggregator'] == 'transformer':
aggregator = TransformerEncoder(config['transformer_layers'], 2048, config['aggregator_hdim'], 5,
config['aggregator_hdim'], .2, 0)
model_histo = resnet
model_rna = torch.nn.Sequential(
nn.Dropout(),
nn.Linear(12778, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, 2048),
)
combine_mlp = torch.nn.Sequential(nn.Dropout(0.8), nn.Linear(4096, 1))
model = BagHistopathologyRNAModel(model_histo, model_rna, combine_mlp)
if config['model_path'] != "":
model.load_state_dict(torch.load(config['model_path']))
print("Loaded model from checkpoint")
print("Loaded model")
input_size = 224
# create Datasets and DataLoaders
data_transforms = {
'train': transforms.Compose([
transforms.Resize(input_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(64.0 / 255, 0.75, 0.25, 0.04),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# Create training and validation datasets
image_datasets = {}
image_samplers = {}
print("loading datasets")
image_datasets['train'] = PatchBagRNADataset(patch_data_path=config["data_path"], csv_path=config["train_csv_path"],img_size=config["img_size"],transforms=data_transforms['train'], bag_size=config['train_bag_size'],max_patch_per_wsi=config.get('max_patch_per_wsi_train', 1000))
image_datasets['val'] = PatchBagRNADataset(patch_data_path=config["data_path"], csv_path=config["val_csv_path"],img_size=config["img_size"], bag_size=config['val_bag_size'], transforms=data_transforms['val'],max_patch_per_wsi=config.get('max_patch_per_wsi_val', 1000))
image_datasets['test'] = PatchBagRNADataset(patch_data_path=config["data_path"], csv_path=config["test_csv_path"],img_size=config["img_size"], bag_size=config['val_bag_size'], transforms=data_transforms['val'],max_patch_per_wsi=config.get('max_patch_per_wsi_val', 1000))
print("loaded datasets")
image_samplers['train'] = RandomSampler(image_datasets['train'])
image_samplers['val'] = SequentialSampler(image_datasets['val'])
image_samplers['test'] = SequentialSampler(image_datasets['test'])
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=int(config['batch_size']) , sampler=image_samplers[x],num_workers=10) for x in ['train', 'val', 'test']}
print("Initialized Datasets and Dataloaders...")
# Send the model to GPU
model = model.to(device)
criterion = CoxLoss()
for dataset in ['train','val','test']:
print("Evaluation for dataset : {}".format(dataset))
_,output=evaluate (model,dataloaders_dict[dataset],device,criterion, mode='val')
#outname = config['flag'].split("_")[-1]
outname = config['flag']
if 'cv' in outname:
output.to_csv(config['output_path']+config['model_path'].split("/")[-1]+"_joint_"+dataset+"_"+outname+"_df.csv")
else:
output.to_csv(config['output_path']+config['model_path'].split("/")[-1]+"_joint_"+dataset+"_df.csv")
### Input arguments
####################
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config.json', help='configuration json file')
parser.add_argument("--seed",type=int,default=1111)
### MAIN
##########
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 9,474 | py | 29 | 2_JointFusion_savescore.py | 17 | 0.624974 | 0.609457 | 0 | 236 | 39.144068 | 281 |
yyjhao/Seer-recognizer | 592,705,534,613 | 4e34b5efdf1e3ad2ea16cf4c93ac7cd42afa1967 | 8e91545f5fabd57ccb98fe55f483a4ba3d15dba5 | /code/bg.py | 7283978341982df686f70c927825bcccfdc34769 | []
| no_license | https://github.com/yyjhao/Seer-recognizer | 69786f0c4ace83f84b14f44041ea3440cbd39208 | 003ad3e3302fd3b917b9711ff6c749d479490f61 | refs/heads/master | 2020-05-30T08:43:29.341950 | 2015-11-13T05:14:12 | 2015-11-13T05:14:12 | 43,367,022 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # bg.py
# Performs background extraction on a video by averaging the
# pixels of every frame in the video
import cv2
import numpy as np
import sys
def main():
# Read video file from command line argument
filename = sys.argv[1]
cap = cv2.VideoCapture(filename)
# Get and print video properties
frame_width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
frame_count = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
print "Frame width:", frame_width
print "Frame height:", frame_height
print "FPS:", fps
print "Frame count:", frame_count
# Read video frames and perform averaging
_, img = cap.read()
avgImg = np.float32(img)
for fr in range(1, frame_count):
_, img = cap.read()
avgImg = (img + fr * avgImg) / (fr + 1)
# Convert into uint8 image
normImg = cv2.convertScaleAbs(avgImg)
# Save the new image
tokens = filename.split('.')
output_file = tokens[0] + '_background.jpg'
cv2.imwrite(output_file, normImg)
# Close video file
cap.release()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,199 | py | 15 | bg.py | 12 | 0.6397 | 0.627189 | 0 | 46 | 25.065217 | 64 |
caelus95/MantaOcean | 16,398,185,146,863 | 2e80f50897f78006385a66a7b1cd909a9ad1beb6 | 3d4a3bebf614086cce8a22510d8c27c0bea52f92 | /Ori/Read_nc/untitled5.py | 73b6ad175aa599ae831d83e06750c89ca9afe472 | []
| no_license | https://github.com/caelus95/MantaOcean | dc031518051daac9b718b4c7664a057a956475f8 | dbc5774f6ecd949a8d8f58c66d0101f816b90dc9 | refs/heads/master | 2023-06-18T22:00:26.353952 | 2021-06-29T13:25:48 | 2021-06-29T13:25:48 | 365,965,350 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 21:10:56 2021
@author: caelus
"""
import pandas as pd
import numpy as np
data = pd.read_csv('/home/caelus/wormhole/mix_elnino.csv')
a,b = data.shape
ep_index = data.values.reshape(-1)
np.save('/home/caelus/dock_1/Working_hub/DATA_dep/Kuroshio/latest/sigs/mix_index',ep_index)
| UTF-8 | Python | false | false | 358 | py | 117 | untitled5.py | 114 | 0.687151 | 0.642458 | 0 | 19 | 17.736842 | 91 |
VSevagen/Chat-Application-using-TCP-Socket-and-Threads | 807,453,869,108 | 40ad8e7eb87b8139b06c8e3c981ac166c49492e3 | 080ec6458420864bf8958f0761fd6267c8112cff | /Client.py | 7260ff82f01d1f7d5c030ab3f8f2dda63a5f1cd7 | []
| no_license | https://github.com/VSevagen/Chat-Application-using-TCP-Socket-and-Threads | 7dbb57be4f93913bae16606949c945989dcafcd4 | 594306bffc585bd4618f1df6783be8a2f3edfc6d | refs/heads/master | 2023-04-20T02:31:26.063224 | 2021-05-11T15:57:40 | 2021-05-11T15:57:40 | 358,806,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Script for Tkinter GUI chat client. """
import tkinter
from tkinter.filedialog import askopenfilename
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def receive():
""" Handles receiving of messages. """
while True:
try:
msg = sock.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError:
break
def send(event=None):
""" Handles sending of messages. """
msg = var.get()
var.set("") # Clears input field.
sock.send(bytes(msg, "utf8"))
if msg == "#quit":
sock.close()
top.quit()
def on_closing(event=None):
""" This function is to be called when the window is closed. """
var.set("#quit")
send()
def smiley_button_tieup(event=None):
""" Function for smiley button action """
var.set(":)")
msg = var.get() # A common smiley character
sock.send(bytes(msg, "utf8"))
var.set("")
def sad_button_tieup(event=None):
""" Function for smiley button action """
var.set(":(") # A common smiley character
msg = var.get()
sock.send(bytes(msg, "utf8"))
var.set("")
def on_enter(e):
quit_button['background'] = 'red'
def on_leave(e):
quit_button['background'] = 'white'
def send_file(event=None):
filename = askopenfilename()
file = open(filename, "rb")
SendData = file.read(1024)
sock.send(SendData)
top = tkinter.Tk()
top.title("Instant Messenger v1.0.0")
messages_frame = tkinter.Frame(top)
var = tkinter.StringVar()
var.set("")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=70, yscrollcommand=scrollbar.set, bg="darkgray")
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack()
messages_frame.pack()
button_label = tkinter.Label(top, text="Enter Message:", background="white")
button_label.pack()
entry_field = tkinter.Entry(top, textvariable=var, foreground="Black")
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send, background="white")
send_button.pack(side='left', ipadx=100)
smiley_button = tkinter.Button(top, text=":)", command=smiley_button_tieup)
smiley_button.pack(side='left')
sad_button = tkinter.Button(top, text=":(", command=sad_button_tieup)
sad_button.pack(side='left')
quit_button = tkinter.Button(top, text="Quit", command=on_closing)
quit_button.bind("<Enter>", on_enter)
quit_button.bind("<Leave>", on_leave)
quit_button.pack(side='left', ipadx=100)
sendFile_button = tkinter.Button(top, text="Send File", command=send_file)
sendFile_button.pack(side='left')
top.protocol("WM_DELETE_WINDOW", on_closing)
BUFSIZ = 1024
ADDR = ('localhost', 5000)
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution. | UTF-8 | Python | false | false | 2,939 | py | 3 | Client.py | 1 | 0.676421 | 0.666553 | 0 | 101 | 28.108911 | 108 |
DenGamleSkurk/advent-of-code | 18,433,999,652,616 | 490c002ef3f74d19ec7899953e681dedfefea2ea | d5dcc8916b89b44a2d98eb257d12411e5cf2fe64 | /2016/day8_1.py | 8a541ee83eb4ae479bf0ecfafe8f230a9c89ee11 | []
| no_license | https://github.com/DenGamleSkurk/advent-of-code | 7930f211c44b04dc4106a8c0413ea4fe173b7c21 | 0b1911f8ee2de823534315f7fc200a4a814b8d1d | refs/heads/master | 2020-03-29T19:26:27.471210 | 2018-09-26T11:03:53 | 2018-09-26T11:03:53 | 150,263,312 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
a = np.zeros((6, 50))
for line in open("input8.txt"):
words = line.split()
if words[0] == "rect":
inst = words[1].split("x")
x = int(inst[0]) #col
y = int(inst[1]) #row
tmp = np.ones((y, x))
a[0:y, 0:x] = tmp
else:
if words[1] == "row":
inst = words[2].split("=")
row = int(inst[1])
amount = int(words[-1])
a[row, :] = np.roll(a[row, :], amount)
else:
inst = words[2].split("=")
col = int(inst[1])
amount = int(words[-1])
a[:, col] = np.roll(a[:, col], amount)
print(np.count_nonzero(a))
| UTF-8 | Python | false | false | 675 | py | 17 | day8_1.py | 17 | 0.435556 | 0.41037 | 0 | 26 | 24.961538 | 50 |
JHUAPL/meta-system | 14,070,312,879,022 | 5dc41f2629022fe88913b661a4935c09a5970762 | 92bf028ca91bf095355c7f3fbc8062099b7645d7 | /system/models/job_manager.py | de5692a8c3fb4a9274146f0f9451bcf9ac51ab59 | [
"Apache-2.0"
]
| permissive | https://github.com/JHUAPL/meta-system | db6b116a782b0aa33bbad175ef2736154350f084 | d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03 | refs/heads/main | 2023-01-10T00:09:31.167176 | 2020-11-13T01:13:58 | 2020-11-13T01:13:58 | 307,448,138 | 7 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **********************************************************************
import queue
from enum import Enum
class JobManager:
queue = queue.Queue() # queue for multi-threading (not multi-processing)
running = False
class JobType(Enum):
SIMULATION = "Simulation"
CLASSIFICATION = "Classification"
EVALUATION = "Evaluation"
class JobStatus(Enum):
QUEUED = "Queued"
PROCESSING = "Processing"
COMPLETED = "Completed"
FAILED = "Failed"
CANCELLED = "Cancelled"
class JobMode(Enum):
REAL_READS = "Real"
SIMULATED_READS = "Simulated"
| UTF-8 | Python | false | false | 1,385 | py | 184 | job_manager.py | 75 | 0.645487 | 0.639711 | 0 | 44 | 30.477273 | 77 |
chengtachu/MUSE_PSM | 197,568,526,587 | 70d22a2213e4956004ebcbc59f752ce57f68e818 | 3146cacb8c03bb1588386408ffccc5f87a190016 | /PSM_main.py | c0acc4939848cd72b5b20943d42162437c7dd262 | [
"MIT"
]
| permissive | https://github.com/chengtachu/MUSE_PSM | 37a18e74d225ba328e190c9b99ff3ce4e97705e1 | 2dd9f20a56b270eda5a7c02f53ff1d71921e5c76 | refs/heads/master | 2023-02-10T20:51:13.048686 | 2021-01-05T15:40:24 | 2021-01-05T15:40:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import time
import cls_instance
def _MainFunction(instance):
# import external data from other module (Data/Input)
instance.update_MUSEInput()
print('{:f}'.format(time.time() - start_time) + " external data loaded")
# run the models
instance.run()
if __name__ == '__main__':
start_time = time.time()
# create instance and import instance configs (Data/nstanceConfig.xlsx)
instance = cls_instance.Instance()
print('{:f}'.format(time.time() - start_time) + " instance created")
# import regiom assumptions (Data/ExoAssumption)
instance.get_RegionAssumption()
# import country assumptions (Data/ExoAssumption)
instance.get_CountryAssumption()
print('{:f}'.format(time.time() - start_time) + " region and country assumption loaded")
# imort market settings (Data/ExoAssumption)
instance.get_MarketSettings()
# import zone assumptions (Data/ExoAssumption)
instance.get_ZoneAssumption()
print('{:f}'.format(time.time() - start_time) + " market and zone data loaded")
# flag for iterative execution, set it false to stop program
bContinune = True
while( bContinune == True):
# first run with base year, then continue util the bContinune is false
_MainFunction(instance)
sNewStartYear = input("New foresight start year: ") # get the first time period of current iteration
if sNewStartYear in str(instance.iAllYearSteps_YS): # input should be within defined time period
instance.iForesightStartYear = int(sNewStartYear) # update the first time period in current iteration
# update modelling year steps and start index
instance.get_FSYearSteps()
else:
print("input year not in selected year steps.")
bContinune = False
| UTF-8 | Python | false | false | 1,855 | py | 266 | PSM_main.py | 20 | 0.667385 | 0.666846 | 0 | 56 | 32.035714 | 115 |
DOAJ/doaj | 13,134,010,020,375 | 4cc5f553093e4af677844090f43ff09b18fe8ec8 | 50db96c78ed909fe3035215a5975554bdbdafcc0 | /portality/migrate/2957_heloise/remove_heloise_policy.py | 25b4b448d3a452e52dcebb7ea47d0e4260a8f49d | [
"Apache-2.0"
]
| permissive | https://github.com/DOAJ/doaj | fc5a53011d5d0042b4d86e0d721561e5cb04c66e | b441932e93a114129539abe4ce79221bd4c7e970 | refs/heads/develop | 2023-08-23T03:21:09.875609 | 2023-08-21T15:34:23 | 2023-08-21T15:34:23 | 14,500,626 | 56 | 17 | Apache-2.0 | false | 2023-09-12T15:48:44 | 2013-11-18T18:07:31 | 2023-07-21T16:59:02 | 2023-09-12T15:48:42 | 213,439 | 51 | 17 | 23 | Python | false | false | import csv
from copy import deepcopy
from portality.models import Journal
from portality.settings import BASE_FILE_PATH
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out", help="output file path")
args = parser.parse_args()
if not args.out:
args.out = "out.csv"
# print("Please specify an output file path with the -o option")
# parser.print_help()
# exit()
with open(args.out, "w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(['Journal ID', 'Old Deposit Policy', "New Deposit Policy", "Has Deposit Policy"])
f = open(BASE_FILE_PATH + "/migrate/2957_heloise/journalID-for-heloise-removal.txt")
ids = f.readlines()
for i in ids:
j = Journal.pull(i.rstrip('\n'))
if j:
bib = j.bibjson()
old_deposit_policy = deepcopy(bib.deposit_policy)
deposit_policy = bib.deposit_policy
if 'Héloïse' in deposit_policy:
deposit_policy.remove('Héloïse')
if len(deposit_policy) == 0:
bib.has_deposit_policy = False
j.save()
writer.writerow([j.id, old_deposit_policy, deposit_policy, bib.has_deposit_policy])
f.close()
| UTF-8 | Python | false | false | 1,371 | py | 1,161 | remove_heloise_policy.py | 589 | 0.570593 | 0.566203 | 0 | 40 | 33.175 | 105 |
pendulumswing/wonderboard | 14,207,751,837,462 | 5257c61100fae6e18f9690ff6e4c6d02eb11f69c | 56865ef15f7ca6cc7962dae67980b8521420c967 | /app/api/board_users.py | 6a73b921bfcde7ee164e7be268fccf53f3ff90ae | [
"MIT"
]
| permissive | https://github.com/pendulumswing/wonderboard | 805a3f382c9acf0d162a8867539990915643fed4 | 60d55a1a70deb5d67ee442e9c9028b9d61d06aa8 | refs/heads/main | 2023-01-09T00:28:57.617377 | 2022-10-26T22:02:48 | 2022-10-26T22:02:48 | 307,538,686 | 0 | 0 | null | true | 2020-10-27T00:10:53 | 2020-10-27T00:10:52 | 2020-10-13T20:16:15 | 2020-10-25T23:13:02 | 12,705 | 0 | 0 | 0 | null | false | false | from .resources import *
@api_rest.route('/board_users')
class BoardUsers(Resource):
# Get all board_users
def get(self):
return find_all('board_users'), 200
# Create board_user
def post(self):
data = request.json
conn = get_connection()
with conn.cursor() as cursor:
cursor.execute(f"""INSERT INTO
board_users (board, "user")
VALUES ('{data['board']}',
'{data['user']}')
RETURNING id;""")
conn.commit()
id = cursor.fetchone()[0]
result = find_one('board_users', id)
return result, 201
@api_rest.route('/board_users/<int:resource_id>')
class BoardUser(Resource):
# Get board_user by id
def get(self, resource_id):
return find_one('board_users', resource_id), 200
# Delete board_user by id
def delete(self, resource_id):
return delete_one('board_users', resource_id)
| UTF-8 | Python | false | false | 1,023 | py | 50 | board_users.py | 20 | 0.533724 | 0.523949 | 0 | 37 | 26.648649 | 56 |
Will-Murphy/dp-address-project | 13,554,916,816,356 | f3068cf3e50878b737c4ca31d9336b27bc9595dc | 748f7c58e0c74a676a01678b9db83b2956b5d7e5 | /src/main_batch.py | 78cefb8ea6e7ceb51a8573aa0e880501a96777f4 | []
| no_license | https://github.com/Will-Murphy/dp-address-project | a096eeeda36bc08993d26a95897bad2a0192df6f | 0ac5a49d527580c375726241a804ae306ffd11f6 | refs/heads/master | 2022-12-20T17:22:19.955622 | 2020-09-18T15:00:02 | 2020-09-18T15:00:02 | 231,607,607 | 2 | 0 | null | false | 2022-12-08T11:26:15 | 2020-01-03T14:46:09 | 2020-09-18T15:00:28 | 2022-12-08T11:26:14 | 2,306 | 2 | 0 | 3 | Python | false | false | import argparse
import sys
from services.open_cage_address_service import OpenCageAddressService
from services.smarty_address_service import SmartyAddressService
from utilities import batch_io
"""
Entry point for batch processing of addresses.It produces output to a csv file.
see readme for usage details.
"""
def run(args=None):
address_service = SmartyAddressService()
address_service.load_config(args['config'], args['usage'])
address_service_2 = OpenCageAddressService()
address_service_2.load_config(args['config'], args['usage'])
if int(args['options']) == 0:
print(f'< using {type(address_service).__name__} ' \
'for address validation and forward geocoding >')
input_address_list = batch_io.read_address_input(args["infile"])
validated_address_list = address_service.validate(args, input_address_list)
processed_address_list = address_service.forward_geocode(args,
validated_address_list)
batch_io.write_general_csv_output(processed_address_list, args['outfile'] )
elif int(args['options']) == 1:
print(f'< using {type(address_service).__name__} for address validation >')
input_address_list = batch_io.read_address_input(args["infile"])
processed_address_list = address_service.validate(args, input_address_list)
batch_io.write_validation_csv_output(processed_address_list, args['outfile'] )
elif int(args['options']) == 2:
print(f'< using {type(address_service).__name__} for forward geocoding >')
input_address_list = batch_io.read_address_input(args["infile"])
processed_address_list = address_service.forward_geocode(args,
input_address_list)
batch_io.write_forward_geocode_csv_output(processed_address_list, args['outfile'] )
elif int(args['options']) == 3:
print(f'< using {type(address_service_2).__name__} for reverse geocoding >')
input_coordinate_list = batch_io.read_coordinate_input(args["infile"])
processed_address_list = address_service_2.reverse_geocode(args,
input_coordinate_list)
batch_io.write_reverse_geocode_csv_output(processed_address_list, args['outfile'] )
else:
print("options parameter takes number 0-3")
if __name__ == '__main__':
# Define available arguments
arg_parser = argparse.ArgumentParser(description="provider address handler")
arg_parser.add_argument('--config', nargs='?', default=None, required=True,
help='specified output file required')
arg_parser.add_argument('--infile', nargs='?', default=None, required=True,
help='infile csv file with provider address data')
arg_parser.add_argument('--outfile', nargs='?', default=None, required=True,
help='specified output file required')
arg_parser.add_argument('--options', nargs='?', default=0, required=False,
help='options to choose what program outputs')
# Get variables from the arguments
args = vars(arg_parser.parse_args(sys.argv[1:]))
args['usage'] = 'batch'
print(f'args: {args}')
run(args)
| UTF-8 | Python | false | false | 3,320 | py | 19 | main_batch.py | 10 | 0.636446 | 0.632831 | 0 | 75 | 43.266667 | 91 |
svpcom/TxOauth2 | 2,886,218,060,493 | c49dcaa7e2f44a4bfb4066352928298ee707f545 | a3881c457d4de43c5fe8b586fb424fad08746bc0 | /txoauth2/errors.py | cc0ae59936028bda46a5fd9be38d6c219e265946 | [
"MIT"
]
| permissive | https://github.com/svpcom/TxOauth2 | 7c15c64604b6f929f2913affa4941a4bc64225f1 | 431f728f18a6d3a2665823bdf34a5d1294df03ed | refs/heads/master | 2021-08-19T04:00:15.969848 | 2017-11-24T17:15:59 | 2017-11-24T17:15:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) Sebastian Scholz
# See LICENSE for details.
import json
import logging
try:
from urllib import urlencode
except ImportError:
# noinspection PyUnresolvedReferences
from urllib.parse import urlencode
from twisted.web.server import NOT_DONE_YET
from txoauth2.util import addToUrl
OK = 200
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
INTERNAL_SERVER_ERROR = 500
SERVICE_UNAVAILABLE = 503
class OAuth2Error(object):
"""
Represents an OAuth2 error. This is not a Python exception and cannot be raised.
It is intended to return a json description of the error and setting the response
code of the request via the generate method, to comply with the OAuth2 specification.
"""
message = None
detail = None
errorUri = None
code = BAD_REQUEST
_logger = logging.getLogger('txOauth2')
def __init__(self, code, message, detail, errorUri=None):
self.code = code
self.message = message
self.detail = detail
self.errorUri = errorUri
def _generateErrorBody(self):
error = {'error': self.message}
if self.detail is not None:
error['error_description'] = self.detail
if self.errorUri is not None:
error['error_uri'] = self.errorUri
return error
def generate(self, request):
"""
Set the response code of the request and return a string representing
the error, which can be written to the request.
:param request: The request.
:return: A string representing the error.
"""
request.setResponseCode(self.code)
request.setHeader('Content-Type', 'application/json;charset=UTF-8')
request.setHeader('Cache-Control', 'no-store')
request.setHeader('Pragma', 'no-cache')
result = json.dumps(self._generateErrorBody()).encode('utf-8')
self._logger.debug('OAuth2 error: {msg}'.format(msg=result))
return result
class AuthorizationError(OAuth2Error):
"""
Represents an error can occur during authorization. The OAuth2 specification says
that these errors should be send to the redirection url with the error details
encoded into the url parameters of the redirect.
"""
state = None
def __init__(self, code, message, detail, errorUri=None, state=None):
super(AuthorizationError, self).__init__(code, message, detail, errorUri)
self.state = state
def _generateErrorBody(self):
error = super(AuthorizationError, self)._generateErrorBody()
if self.state is not None:
error['state'] = self.state
return error
def generate(self, request, redirectUri=None, errorInFragment=False):
"""
If a redirectUri is given, the request is redirected to the url with the error details
encoded into the url parameter. Otherwise it behaves like generate in OAuth2Error.
:param request: The request.
:param redirectUri: An optional redirect uri.
:param errorInFragment: Whether or not the error shout be send in the query or fragment.
:return: NOT_DONE_YET or a string representing the error.
"""
if redirectUri is None:
return super(AuthorizationError, self).generate(request)
else:
request.setResponseCode(self.code)
errorParameter = self._generateErrorBody()
self._logger.debug('OAuth2 error: {msg}'.format(msg=errorParameter))
for key, value in errorParameter.items():
if not (isinstance(value, str) or isinstance(value, bytes)):
errorParameter[key] = value.encode('utf-8') # For Python 2 unicode strings
destination = 'fragment' if errorInFragment else 'query'
request.redirect(addToUrl(redirectUri, **{destination: errorParameter}))
request.finish()
return NOT_DONE_YET
class OAuth2RequestError(OAuth2Error):
""" An error that happens during a request to a protected resource. """
_wwwAuthenticateContent = ''
scope = []
def __init__(self, code, message, detail, scope, errorUri=None, addDetailsToHeader=True):
super(OAuth2RequestError, self).__init__(code, message, detail, errorUri)
self.scope = scope
if addDetailsToHeader:
self._wwwAuthenticateContent += ',scope="' + ' '.join(scope) + '"'
self._wwwAuthenticateContent += ',error="' + message + '"'
self._wwwAuthenticateContent += ',error_description="' + detail + '"'
if errorUri is not None:
self._wwwAuthenticateContent += ',error_uri="' + errorUri + '"'
def _generateErrorBody(self):
body = super(OAuth2RequestError, self)._generateErrorBody()
body['scope'] = self.scope[0] if len(self.scope) == 1 else self.scope
return body
def generate(self, request):
content = 'Bearer realm="{realm}"'.format(realm=request.prePathURL())\
+ self._wwwAuthenticateContent
request.setHeader('WWW-Authenticate', content)
return super(OAuth2RequestError, self).generate(request)
class UnauthorizedOAuth2Error(OAuth2Error):
""" Error during a request to the token resource without valid client credentials. """
def __init__(self, code, message, detail, errorUri=None):
super(UnauthorizedOAuth2Error, self).__init__(code, message, detail, errorUri)
def generate(self, request):
authorizationHeader = request.getHeader(b'Authorization')
if authorizationHeader is not None:
authType = authorizationHeader.strip().split(b' ', 1)[0]
request.setHeader(b'WWW-Authenticate',
authType + b' realm="' + request.prePathURL() + b'"')
return super(UnauthorizedOAuth2Error, self).generate(request)
class MissingParameterError(AuthorizationError):
def __init__(self, name=None, state=None):
if name is None:
message = 'A required parameter was missing from the request'
else:
message = 'Request was missing the \'{name}\' parameter'.format(name=name)
super(MissingParameterError, self).__init__(BAD_REQUEST, 'invalid_request',
message, state=state)
class InvalidParameterError(AuthorizationError):
def __init__(self, name=None, state=None):
if name is None:
message = 'A required parameter was invalid'
else:
message = 'The parameter \'{name}\' is invalid'.format(name=name)
super(InvalidParameterError, self).__init__(BAD_REQUEST, 'invalid_request',
message, state=state)
class InsecureConnectionError(AuthorizationError):
def __init__(self, state=None):
message = 'OAuth 2.0 requires requests over HTTPS'
super(InsecureConnectionError, self).__init__(BAD_REQUEST, 'invalid_request',
message, state=state)
class UnsupportedResponseTypeError(AuthorizationError):
def __init__(self, responseType, state=None):
message = 'Obtaining an authorization code using this method ' \
'is not supported: ' + responseType
super(UnsupportedResponseTypeError, self).__init__(
BAD_REQUEST, 'unsupported_response_type', message, state=state)
class ServerError(AuthorizationError):
def __init__(self, state=None):
message = 'An unexpected condition was encountered and the request could not get fulfilled.'
super(ServerError, self).__init__(
SERVICE_UNAVAILABLE, 'server_error', message, state=state)
class TemporarilyUnavailableError(AuthorizationError):
def __init__(self, state=None):
message = 'The request could not be handled due to a temporary overloading or maintenance.'
super(TemporarilyUnavailableError, self).__init__(
BAD_REQUEST, 'temporarily_unavailable', message, state=state)
class MultipleParameterError(AuthorizationError):
def __init__(self, parameter=None, state=None):
message = 'The request contained a duplicate parameter'
if parameter is not None:
message += ': ' + parameter
super(MultipleParameterError, self).__init__(
BAD_REQUEST, 'invalid_request', message, state=state)
class MalformedParameterError(AuthorizationError):
def __init__(self, name, state=None):
message = 'The parameter \'{name}\' was malformed'.format(name=name)
super(MalformedParameterError, self).__init__(
BAD_REQUEST, 'invalid_request', message, state=state)
class UnauthorizedClientError(AuthorizationError):
def __init__(self, grantType=None, state=None):
message = 'The authenticated client is not authorized to use this authorization grant type'
if grantType is not None:
message += ': ' + grantType
super(UnauthorizedClientError, self).__init__(
BAD_REQUEST, 'unauthorized_client', message, state=state)
class InvalidRedirectUriError(OAuth2Error):
def __init__(self):
message = 'Invalid redirection URI'
super(InvalidRedirectUriError, self).__init__(BAD_REQUEST, 'invalid_request', message)
class InvalidClientIdError(UnauthorizedOAuth2Error):
def __init__(self):
message = 'Invalid client_id'
super(InvalidClientIdError, self).__init__(UNAUTHORIZED, 'invalid_client', message)
class NoClientAuthenticationError(UnauthorizedOAuth2Error):
def __init__(self):
message = 'The request was missing client authentication'
super(NoClientAuthenticationError, self).__init__(UNAUTHORIZED, 'invalid_client', message)
class InvalidClientAuthenticationError(UnauthorizedOAuth2Error):
def __init__(self):
super(InvalidClientAuthenticationError, self).__init__(
UNAUTHORIZED, 'invalid_client', 'The client could not get authenticated.')
class InvalidTokenError(OAuth2Error):
def __init__(self, tokenType):
message = 'The provided {type} is invalid'.format(type=tokenType)
super(InvalidTokenError, self).__init__(BAD_REQUEST, 'invalid_grant', message)
class DifferentRedirectUriError(OAuth2Error):
def __init__(self):
message = 'The redirect_uri does not match the ' \
'redirection URI used in the authorization request'
super(DifferentRedirectUriError, self).__init__(BAD_REQUEST, 'invalid_grant', message)
class InvalidScopeError(AuthorizationError):
def __init__(self, scope, state=None):
if isinstance(scope, list):
scope = ' '.join(scope)
if isinstance(scope, bytes):
scope = scope.decode('utf-8', errors='replace')
message = 'The provided scope is invalid: ' + scope
super(InvalidScopeError, self).__init__(BAD_REQUEST, 'invalid_scope', message, state=state)
class UnsupportedGrantTypeError(OAuth2Error):
def __init__(self, grantType=None):
message = 'The authorization grant type is not supported'
if grantType is not None:
message += ': ' + grantType
super(UnsupportedGrantTypeError, self).__init__(
BAD_REQUEST, 'unsupported_grant_type', message)
class MultipleClientCredentialsError(OAuth2Error):
def __init__(self):
super(MultipleClientCredentialsError, self).__init__(
BAD_REQUEST, 'invalid_request', 'The request contained multiple client credentials')
class MultipleClientAuthenticationError(OAuth2Error):
def __init__(self):
message = 'The request utilized more than one mechanism for authenticating the client'
super(MultipleClientAuthenticationError, self).__init__(
BAD_REQUEST, 'invalid_request', message)
class MalformedRequestError(OAuth2Error):
def __init__(self, msg=None):
message = 'The request was malformed'
if msg is not None:
message += ': ' + msg
super(MalformedRequestError, self).__init__(BAD_REQUEST, 'invalid_request', message)
class UserDeniesAuthorization(AuthorizationError):
def __init__(self, state=None):
message = 'The resource owner denied the request'
super(UserDeniesAuthorization, self).__init__(OK, 'access_denied', message, state=state)
class MissingTokenError(OAuth2RequestError):
def __init__(self, scope):
message = 'No access token provided'
super(MissingTokenError, self).__init__(
UNAUTHORIZED, 'invalid_request', message, scope, addDetailsToHeader=False)
class InvalidTokenRequestError(OAuth2RequestError):
def __init__(self, scope):
message = 'The access token is invalid'
super(InvalidTokenRequestError, self).__init__(
UNAUTHORIZED, 'invalid_token', message, scope)
class InsufficientScopeRequestError(OAuth2RequestError):
def __init__(self, scope):
message = 'The request requires higher privileges than provided by the access token'
super(InsufficientScopeRequestError, self).__init__(
FORBIDDEN, 'insufficient_scope', message, scope)
class MultipleTokensError(OAuth2RequestError):
def __init__(self, scope):
message = 'The request contained multiple access tokens'
super(MultipleTokensError, self).__init__(BAD_REQUEST, 'invalid_request', message, scope)
| UTF-8 | Python | false | false | 13,423 | py | 6 | errors.py | 6 | 0.659614 | 0.654995 | 0 | 331 | 39.55287 | 100 |
CamiloMeassiMacoris/20_Exercicios_Python | 10,977,936,423,977 | 5f070cfa94c7353d8d28dd8f8457ca178b084503 | c543bc74ea5199cb1df40f374cb7fd025e2f6bf9 | /17.py | 12aeca77366ded1178d28d5e88d77f4725b75d95 | []
| no_license | https://github.com/CamiloMeassiMacoris/20_Exercicios_Python | ac546675045982f761502a9286a00df5d73ab16b | 07a5ed2becb5721ac17b2b121e58b56d0c8b949d | refs/heads/master | 2022-04-12T19:11:16.978508 | 2020-04-02T23:42:37 | 2020-04-02T23:42:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def valorPagamento(valor,dias):
if dias == 0:
return valor
elif dias > 0:
return valor + valor*3/100 + valor*0.1/100*10
else:
return valor
prestacoes = []
sair = 1
while sair != 0:
valor = float(input('Digite o valor da prestação: '))
atraso = int(input('Digite os dias de atraso: '))
prestacoes.append(valorPagamento(valor,atraso))
sair = int(input('Adicionar outra prestação (1)\nEncerrar (0)\n>>'))
print(f'Quantidade de prestações: {len(prestacoes)}\nTotal do valor: {sum(prestacoes)}') | UTF-8 | Python | false | false | 564 | py | 20 | 17.py | 20 | 0.630824 | 0.600358 | 0 | 16 | 33 | 88 |
pvoosten/wolfit | 14,139,032,365,625 | 06d375fa6c1fafaf76df7c068f2917819c961007 | c26efd93535fe7d4d3f0f72573a3cd1c1d413153 | /src/wolfit/test_partial_process.py | 40fcd7dc1b4fe3ab5484de2e9bd6370a7b8121ad | []
| no_license | https://github.com/pvoosten/wolfit | d08616e8dd98d382908498982f955bac72a598d0 | d7f3fba5ff5af290f00c6436f03654e32d0d471a | HEAD | 2016-08-04T17:06:22.392634 | 2011-01-22T00:50:14 | 2011-01-22T00:50:14 | 32,882,365 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Sep 22, 2010
@author: Philip van Oosten
@license: GNU GPLv2
'''
import unittest
import wolfit.runners
class BasePartialProcessTest(object):
'''
A partial process run is an execution of a process with only a part of the
possible inputs given as the amount of input necessary to execute the
process completely. Also, you must select which outputs you would like to
fetch.
Only the tasks required to fetch the output are executed. If not enough
input is given, the input from a previous run of the process is given. The
tasks that are required to be performed are selected using mark and sweep.
As much as possible, tasks are not executed if the output they would produce
is the same as in the previous execution of the process.
Since partial processes require resources to store previous inputs, there
must be a mechanism to efficiently store and retrieve those resources.
'''
def _test_run_partial_process(self):
self.fail('Test not implemented')
class SequentialPartialProcessTest(BasePartialProcessTest, unittest.TestCase):
def setUp(self):
self.runner = wolfit.runners.SequentialRunner
class ThreadPoolPartialProcessTest(BasePartialProcessTest, unittest.TestCase):
def setUp(self):
self.runner = wolfit.runners.ThreadPoolRunner()
class ReusableThreadPoolPartialProcessTest(BasePartialProcessTest, unittest.TestCase):
runner = wolfit.runners.ThreadPoolRunner()
def setUp(self):
self.runner = ReusableThreadPoolPartialProcessTest.runner
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| UTF-8 | Python | false | false | 1,669 | py | 14 | test_partial_process.py | 9 | 0.739365 | 0.735171 | 0 | 45 | 36.088889 | 86 |
stuymmillar/SoftDev | 17,214,228,949,321 | 75e038057accc788216974addba31d2f979a9757 | c8164a02c28112ce8aab34fea96d30ce0e86d57f | /17_db-nmcrnch/stu_mean.py | bfded3ad59bb2753933c86df69e465beec1e2c52 | []
| no_license | https://github.com/stuymmillar/SoftDev | 46cef51062e5b3e12ba85666480f200e2251749a | 53630546f25673b06165fd87d75b7eae9abae1b7 | refs/heads/master | 2021-07-22T19:19:24.617960 | 2019-01-06T00:05:59 | 2019-01-06T00:05:59 | 148,509,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Madison - Max Millar, Addison Huang
import sqlite3
import csv
DB_FILE="discobandit.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
def findAvg(stuId):
c.execute("SELECT mark FROM courses WHERE courses.id = '" + str(stuId) + "';")
grades = c.fetchall()
sum = 0
for x in grades:
sum += int(x[0])
sum = int(sum / len(grades))
return sum
def dispStudent(stuId):
avg = findAvg(stuId)
c.execute("SELECT name FROM peeps WHERE peeps.id = '" + str(stuId) + "';")
name = c.fetchall()
name = str(name[0])
name = name[3:len(name) - 3]
print("Id: " + str(stuId) + "|Name: " + name + "|Average: " + str(avg))
def createTable():
c.execute("CREATE TABLE peeps_avg(id INTEGER, avg INTEGER)")
for x in range(1,11):
avg = findAvg(x)
c.execute("INSERT INTO peeps_avg VALUES(" + str(x) + ", " + str(avg) + ")")
def updateTable():
c.execute("DROP TABLE peeps_avg")
c.execute("CREATE TABLE peeps_avg(id INTEGER, avg INTEGER)")
for x in range(1,11):
avg = findAvg(x)
print("INSERT INTO peeps_avg(" + str(x) + ", " + str(avg) + ")")
c.execute("INSERT INTO peeps_avg VALUES(" + str(x) + ", " + str(avg) + ")")
def addCourse(stu_id, course, grade):
print("INSERT INTO courses VALUES('" + course + "', " + str(stu_id) + ", '" + str(grade) + ")")
c.execute("INSERT INTO courses VALUES('" + course + "', '" + str(stu_id) + "', '" + str(grade) + "')")
updateTable()
dispStudent(1)
#createTable()
updateTable()
addCourse(1, "ballroom", 1000)
db.commit()
db.close()
| UTF-8 | Python | false | false | 1,592 | py | 35 | stu_mean.py | 14 | 0.575377 | 0.563442 | 0 | 56 | 27.428571 | 106 |
Sahanduiuc/TradingStrategyTrial | 13,881,334,341,714 | 2f0ff292b039569bb5aa14afe64261bb396c344c | 52edc4544dad6e6991a9bea382623c5d44b6e44a | /OtherStrategy/BollingerBandsMain.py | 03c7c4369ab72ec8b640519b848d0192d1f70402 | []
| no_license | https://github.com/Sahanduiuc/TradingStrategyTrial | 082407ddc2b78a6eda73fadf1d2e9c187cf634d5 | 7fd62f8b576ffc8f714b590d78fefd8a3a03dc61 | refs/heads/master | 2021-05-22T00:25:44.414180 | 2020-01-04T02:15:36 | 2020-01-04T02:15:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.tools import quandl
from pyalgotrade.technical import bollinger
from pyalgotrade.stratanalyzer import sharpe
from pyalgotrade import broker as basebroker
from pyalgotrade.barfeed import yahoofeed
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
class BollingerBandsStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, calibratedBollingerBandsPeriod):
super(BollingerBandsStrategy, self).__init__(feed, 1000000)
self.__instrument = instrument
self.__bbands = bollinger.BollingerBands(feed[instrument].getCloseDataSeries(), calibratedBollingerBandsPeriod, 2)
def getBollingerBands(self):
return self.__bbands
def onOrderUpdated(self, order):
if order.isBuy():
orderType = "Buy"
else:
orderType = "Sell"
self.info("%s order %d updated - Status: %s" % (
orderType, order.getId(), basebroker.Order.State.toString(order.getState())
))
def onBars(self, bars):
lower = self.__bbands.getLowerBand()[-1]
upper = self.__bbands.getUpperBand()[-1]
if lower is None:
return
shares = self.getBroker().getShares(self.__instrument)
bar = bars[self.__instrument]
if shares == 0 and bar.getClose() < lower:
sharesToBuy = int(self.getBroker().getCash(False) / bar.getClose())
self.info("Placing buy market order for %s shares" % sharesToBuy)
self.marketOrder(self.__instrument, sharesToBuy)
elif shares > 0 and bar.getClose() > upper:
self.info("Placing sell market order for %s shares" % shares)
self.marketOrder(self.__instrument, -1*shares)
def main(plot):
instrument = "n225"
CalibratedBollingerBandsPeriod = 40
feed = yahoofeed.Feed()
feed.addBarsFromCSV(instrument, r".\Data\n225.csv")
strat = BollingerBandsStrategy(feed, instrument, CalibratedBollingerBandsPeriod)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
if plot:
plt = plotter.StrategyPlotter(strat, True, True, True)
plt.getInstrumentSubplot(instrument).addDataSeries("upper", strat.getBollingerBands().getUpperBand())
plt.getInstrumentSubplot(instrument).addDataSeries("middle", strat.getBollingerBands().getMiddleBand())
plt.getInstrumentSubplot(instrument).addDataSeries("lower", strat.getBollingerBands().getLowerBand())
strat.run()
strat.info("Final portfolio value: $%.2f" % strat.getResult())
print("Sharpe ratio: %.2f" % sharpeRatioAnalyzer.getSharpeRatio(0.05))
if plot:
plt.plot()
if __name__ == "__main__":
main(True) | UTF-8 | Python | false | false | 2,812 | py | 23 | BollingerBandsMain.py | 8 | 0.6867 | 0.677454 | 0 | 72 | 38.069444 | 122 |
ChangeXuan/windowsClearFile-Python | 5,858,335,436,416 | 92e2f8f87784ed1dcb50247e29fc0a51c0615a8f | e3a787ce67b22f69b950bb6d75fdf6f025256bb2 | /otherMsg.py | 748bdfba16064d65db1ad74c7afa6384446fd004 | []
| no_license | https://github.com/ChangeXuan/windowsClearFile-Python | 45adfd386139e5d211b1c9f300224bfd381dbbfe | 8d0201aec5aec240e81687b9d65620d83f57b613 | refs/heads/master | 2021-06-17T21:17:21.337535 | 2017-06-09T08:50:52 | 2017-06-09T08:50:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # test = {
# 'one':'oneoneone',
# 'two':'twotwotwo',
# 'three':'three'
# }
#
# for item in test.items():
# print(item)
# del_extension = {
# '.tmp': '临时文件',
# '._mp': '临时文件_mp',
# '.log': '日志文件',
# '.gid': '临时帮助文件',
# '.chk': '磁盘检查文件',
# '.old': '临时备份文件',
# '.xlk': 'Excel备份文件',
# '.bak': '临时备份文件bak'
# }
#
# delInfo = {}
# for k,v in del_extension.items():
# # dict构建一个name count结构的字典值
# delInfo[k] = dict(name = v, count = 0)
#
# print(delInfo)
#
# import json
#
# a = {'one':1000,'two':2000}
# print(a)
# b = json.dumps(a)
# print(b)
| UTF-8 | Python | false | false | 672 | py | 4 | otherMsg.py | 3 | 0.501748 | 0.486014 | 0 | 34 | 15.794118 | 44 |
yekeren/Cap2Det | 7,447,473,314,598 | ce7d149e5974ea343b4c9061991e11e03f683843 | 7e7ae13002510ff4e0f23c462236c41b920ce6fe | /models/registry.py | cd4a18d92b374d255d07bf47be2163d121c76e4f | [
"Apache-2.0"
]
| permissive | https://github.com/yekeren/Cap2Det | e9f1d855cd663b5170742f2183e4b42d9a60f702 | 727b3025f666e2053b3bbf94cf18f9ab56fb1599 | refs/heads/cap2det | 2022-11-25T05:35:35.204237 | 2021-04-19T21:59:20 | 2021-04-19T21:59:20 | 199,341,640 | 34 | 11 | Apache-2.0 | false | 2022-11-22T02:57:30 | 2019-07-28T22:06:18 | 2022-11-21T08:10:28 | 2022-11-22T02:57:30 | 19,175 | 30 | 10 | 12 | Python | false | false | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_registry = {}
def register_model_class(cid, cls):
"""Registers a model class.
Args:
cis: Class id.
cls: A specific class.
"""
global _registry
_registry[cid] = cls
tf.logging.info('Function registered: %i', cid)
def get_registered_model_classes():
"""Returns the dict mapping class ids to classes.
Returns:
registry: A python dict.
"""
return _registry
| UTF-8 | Python | false | false | 529 | py | 63 | registry.py | 48 | 0.676749 | 0.676749 | 0 | 30 | 16.633333 | 51 |
jpaav/SnakeGame | 16,819,091,946,823 | 810a53dbde129bc138b2deef392090940058f0b4 | 831c8904b18cc993d16e99ba04a6f78c4b5ce21e | /logic/game.py | 7da6278148e510134dc3886b312fee3119322a7a | []
| no_license | https://github.com/jpaav/SnakeGame | d47b48540ff58f7ff6c38c75699f3a5409898de9 | 0d28c53a5c0a53a3bdea7ae22055f97c5cb97864 | refs/heads/master | 2020-04-10T07:07:18.231442 | 2019-01-24T13:08:51 | 2019-01-24T13:08:51 | 160,872,834 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
import numpy as np
import pygame as pg
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from uuid import getnode
from classes.board import Board
from classes.snake import Snake
from logic.difficulties import Difficulties
from logic.states import States
class SnakeGame:
should_quit = False
height = 592
width = 592
state = States(0)
screen = pg.display.set_mode((width, height))
font = None
info_font = None
board = Board((width, height))
snake = Snake()
difficulty = Difficulties(0)
apple_spawn_rate = 2000 # milliseconds between apple spawns
apple_spawn_amount = 1 # number of apples to spawn each time
snake_speed = 500 # milliseconds between moves for snake
SPAWN_APPLES = pg.USEREVENT+1
MOVE = pg.USEREVENT+2
start_time = 0
score = 0
should_upload_scores = False
image_title = None
def start(self):
# Init PyGame
(passed, failed) = pg.init()
print("Number of modules successfully loaded: " + str(passed))
print("Number of modules failed to load: " + str(failed))
# Close program if any modules fail to load
if failed > 0:
return 1
# Create screen with given dimensions
pg.display.set_mode((self.width, self.height))
pg.display.set_caption("Stat-erpillar")
# Set up fonts
self.font = pg.font.SysFont('Comic Sans MS', 30)
self.info_font = pg.font.SysFont('Arial', 18)
# Load images
self.image_title = pg.image.load('resources/titlescreen.png')
self.image_title = pg.transform.scale(self.image_title, (self.width, self.height))
# Init game logic
self.state = States.TITLE
# Set event timers
pg.time.set_timer(self.SPAWN_APPLES, self.apple_spawn_rate)
pg.time.set_timer(self.MOVE, self.snake_speed)
# Loop until should_quit is changed to true
while not self.should_quit:
self.loop()
return 0
def loop(self):
self.check_events()
if self.state == States.GAME:
if self.snake.alive:
self.score = self.board.update_board(self.snake, self.score)
else:
# Upload score
if self.should_upload_scores:
self.upload_score()
# Go to dead state
self.state = States.DEAD
self.draw()
pg.display.update()
def check_events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.should_quit = True
# Only spawn apples if event has been sent by timer and the player is in the game
if event.type == self.SPAWN_APPLES and self.state == States.GAME:
self.board.spawn_apples(self.apple_spawn_amount)
# Only move at correct time intervals
if event.type == self.MOVE:
self.snake.move()
# Check keypress
if event.type == pg.KEYDOWN:
# Quit on escape
if event.key == pg.K_ESCAPE:
self.should_quit = True
# Handle button input
if event.key == pg.K_SPACE:
if self.state == States.GAME:
self.snake.turn()
elif self.state == States.TITLE:
self.begin_game()
elif self.state == States.DEAD:
self.state = States.TITLE
# Draw functions
def draw(self):
if self.state == States.TITLE:
self.draw_title()
elif self.state == States.GAME:
self.draw_game()
elif self.state == States.SCORE:
self.draw_score()
elif self.state == States.DEAD:
self.draw_dead()
def draw_title(self):
# TODO: Replace this with a picture logo Ethan made
self.screen.fill([0, 200, 0])
# title_text = self.font.render('Stat-erpillar', True, (255, 255, 255))
# self.screen.blit(title_text, ((self.width/2) - title_text.get_rect().width/2, 10))
self.screen.blit(self.image_title, (self.width/2 - self.image_title.get_rect().width / 2, 0))
def draw_game(self):
self.screen.fill([0, 200, 0])
self.board.draw(self.screen)
self.snake.draw(self.screen)
# Draw info box
info_box = pg.Surface((200, 100))
info_box.set_alpha(200)
info_box.fill([255, 255, 255])
self.screen.blit(info_box, (0, 0))
# Draw info text
time_text = self.info_font.render("Elapsed Time: " + str((pg.time.get_ticks()-self.start_time)/1000), True, (0, 0, 0))
score_text = self.info_font.render("Score: " + str(self.score), True, (0, 0, 0))
self.screen.blit(time_text, (5, 5))
self.screen.blit(score_text, (5, 25))
def draw_score(self):
pass
def draw_dead(self):
white_box = pg.Rect(self.board.get_center()[0]-100, self.board.get_center()[1]-50, 200, 100)
pg.draw.rect(self.screen, (255, 255, 255), white_box)
title_text = self.font.render('You Died', True, (0, 0, 0))
self.screen.blit(title_text, ((self.width / 2) - title_text.get_rect().width / 2, (self.height / 2) - title_text.get_rect().height / 2))
def set_difficulty(self, difficulty):
if difficulty == Difficulties.EASY:
self.difficulty = difficulty.EASY
self.apple_spawn_rate = 2000
self.apple_spawn_amount = 1
if difficulty == Difficulties.MEDIUM:
self.difficulty = difficulty.MEDIUM
self.apple_spawn_rate = 1000
self.apple_spawn_amount = 2
if difficulty == Difficulties.HARD:
self.difficulty = difficulty.HARD
self.apple_spawn_rate = 500
self.apple_spawn_amount = 3
def begin_game(self):
self.start_time = pg.time.get_ticks()
self.snake = Snake(4, draw_simple=True)
self.snake.set_snake_pos(self.board.get_center_tile(), np.array([self.board.tile_size(), 0]), self.board.tile_size())
self.board.clear()
self.score = 0
self.state = States.GAME
def upload_score(self):
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
# Call the Sheets API
sheet = service.spreadsheets()
dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
new_entry = [
[
dt, self.score, getnode(), pg.time.get_ticks() - self.start_time
]
]
body = {
'values': new_entry
}
result = sheet.values().append(
spreadsheetId='1vxIHPnCOS1Uv42N-aKQwkB3daFM-0W23-A_It9vYEPw', range='Uploaded_Data!A1:D1',
valueInputOption="USER_ENTERED", body=body).execute()
print('{0} cells appended .'.format(result.get('updates').get('updatedCells')))
| UTF-8 | Python | false | false | 6,204 | py | 9 | game.py | 7 | 0.68343 | 0.659413 | 0 | 190 | 31.647368 | 138 |
LYQCOOL/Vueshops | 12,292,196,402,725 | e85d8d9f0f54291c24b775881a34e21b905cd7e1 | 9a1e39e525bb1eabe71ea4ea34e58f19f422d88f | /apps/users/adminx.py | cbb061fd901cd33d7289102a513e23e238b5930a | []
| no_license | https://github.com/LYQCOOL/Vueshops | 6976ef6dde730eb50116315ebaf936ad2f690edb | 3ccc9bd50653ee38d59a5b8c4f4fa19d19f0b874 | refs/heads/master | 2020-03-28T05:04:21.961426 | 2018-09-07T01:37:32 | 2018-09-07T01:37:32 | 147,754,305 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # _*_ encoding:utf-8 _*_
__author__ = 'LYQ'
__data__ = '2018/8/17 19:37'
import xadmin
from xadmin import views
from .models import *
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
class GlobalSetting(object):
site_title='购物网后台管理'
site_footer='七七大公司'
menu_style='accordion'
class VerifyCodeAdmin(object):
list_display=['code','mobile','add_time']
search_fields=['code','mobile']
list_filter=['code','mobile']
xadmin.site.register(VerifyCode,VerifyCodeAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting)
xadmin.site.register(views.CommAdminView,GlobalSetting) | UTF-8 | Python | false | false | 655 | py | 23 | adminx.py | 20 | 0.709984 | 0.690967 | 0 | 28 | 21.571429 | 55 |
Thigos/PyChat-CLI | 5,927,054,880,124 | 37cb5bcc65ede8f7efaa9e8172e70bb308cec6a6 | 3bec2018661d470936e21d84f6a7dcfe8f751f9c | /bin/Database.py | 2cdaa6381df0931a679589d37488080414cf1c33 | [
"MIT"
]
| permissive | https://github.com/Thigos/PyChat-CLI | 7ca8f704157f4705c178b7379cb7c394114957d8 | 7e938ae2c1c88350f5a78c163bde441214f02761 | refs/heads/main | 2023-05-28T18:39:42.534993 | 2021-06-04T05:37:44 | 2021-06-04T05:37:44 | 366,591,786 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pyrebase
#Configuração do Firebase
firebaseConfig = {
apiKey: "api-key",
authDomain: "project-id.firebaseapp.com",
databaseURL: "https://project-id.firebaseio.com",
projectId: "project-id",
storageBucket: "project-id.appspot.com",
messagingSenderId: "sender-id",
appID: "app-id",
}
firebase = pyrebase.initialize_app(firebaseConfig)
| UTF-8 | Python | false | false | 363 | py | 5 | Database.py | 4 | 0.722992 | 0.722992 | 0 | 15 | 23 | 51 |
jsoldani/ship-to | 15,582,141,359,856 | f438f3750a9bd0209903504efc12fc1c89e2b97b | 301d22ed52e78dc4b7dbdcf5124bf3f111f1744f | /loader/config/costs-template-getter.py | b767e5a5a5f23024bbb372c98a30bfa1d6f44357 | [
"Apache-2.0"
]
| permissive | https://github.com/jsoldani/ship-to | 2fa15ce9cadb4cb01e5e1290c67b0f00d5399d31 | 92ff47d4c44278778e9e75892ba90eb8e34f0181 | refs/heads/master | 2020-12-27T19:02:04.536486 | 2020-09-16T14:04:40 | 2020-09-16T14:04:40 | 238,014,296 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def cost(node):
return costs[node]
| UTF-8 | Python | false | false | 39 | py | 16 | costs-template-getter.py | 8 | 0.666667 | 0.666667 | 0 | 2 | 18.5 | 22 |
copenlu/politihop | 11,149,735,121,749 | ee6ab0447e5edb990fa8ef4f9e0810cd27e6a727 | 268eb04895b6beb8fd656204ed7cd3cff9e6c2b8 | /Transformer-XH/data/__init__.py | ffeeb2dc27ceb5d2723f3662bca6952466b8e435 | [
"MIT"
]
| permissive | https://github.com/copenlu/politihop | 95feae4d5e0816363b10f89f721bb141bfe1d549 | bcb16fd6ff983337d5aae47494477be623a015d8 | refs/heads/master | 2023-06-16T00:13:24.566764 | 2021-07-15T07:47:37 | 2021-07-15T07:47:37 | 297,984,085 | 8 | 1 | null | false | 2020-10-16T13:32:32 | 2020-09-23T13:46:47 | 2020-09-23T16:14:14 | 2020-10-16T13:32:31 | 1,436 | 0 | 1 | 0 | null | false | false | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# import the classes in the files of the data folder here.
from .base import TransformerXHDataset
from .hotpotqa import HotpotDataset, batcher_hotpot
from .fever import FEVERDataset,batcher_fever
from .utils import load_data | UTF-8 | Python | false | false | 297 | py | 38 | __init__.py | 18 | 0.811448 | 0.811448 | 0 | 7 | 41.571429 | 58 |
Ddollz/Writeam | 8,186,207,710,608 | 521ff22c374fee08ff114cca64318ad863dccf2f | 20670b9904ddfdd9f6f42b734734319249c37171 | /clientApp/migrations/0013_auto_20210910_1744.py | a979b02ab88b01afdb0e59cfe7aafb44bd38999c | []
| no_license | https://github.com/Ddollz/Writeam | beced57abdba5b436d9e927dba9c30c2ff817f80 | 8b3c8916b812aa11d7b096eeb009292be6707048 | refs/heads/main | 2023-08-31T10:09:00.376411 | 2021-10-07T08:42:18 | 2021-10-07T08:42:18 | 391,028,558 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.6 on 2021-09-10 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clientApp', '0012_auto_20210907_0745'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': 'Article'},
),
migrations.AlterModelOptions(
name='education',
options={'verbose_name': 'Eduction'},
),
migrations.AlterModelOptions(
name='employmenthistory',
options={'verbose_name': 'Employment History'},
),
migrations.AlterModelOptions(
name='link',
options={'verbose_name': 'Social Link'},
),
migrations.AlterModelOptions(
name='personaldetails',
options={'verbose_name': 'Personal Details'},
),
migrations.AlterModelOptions(
name='reference',
options={'verbose_name': 'Reference'},
),
migrations.AlterModelOptions(
name='skill',
options={'verbose_name': 'Skill'},
),
migrations.AlterField(
model_name='personaldetails',
name='fname',
field=models.CharField(max_length=200, null=True, verbose_name='First Name'),
),
migrations.AlterField(
model_name='personaldetails',
name='lname',
field=models.CharField(max_length=200, null=True, verbose_name='Last Name'),
),
]
| UTF-8 | Python | false | false | 1,558 | py | 131 | 0013_auto_20210910_1744.py | 70 | 0.550064 | 0.526316 | 0 | 51 | 29.54902 | 89 |
Penultimatum/discord-boar-bot | 8,864,812,546,441 | fc30beabc431a109019cba0c8770ba7918825577 | 61c13147d43c6bf848f3cf2bf7bd575f580880ab | /src/boarbot/common/botmodule.py | 7aa1ed94685556ebf4931341f346f2e828bb6dcf | [
"MIT"
]
| permissive | https://github.com/Penultimatum/discord-boar-bot | bb4ad8365ac6d85ebed7babd8473ba83b3052d9a | 1222b4e1df2513928a5ec20fd5d292d6e15f6c2c | refs/heads/master | 2021-01-02T09:10:02.720805 | 2017-06-23T20:53:04 | 2017-06-23T20:53:04 | 99,149,047 | 0 | 0 | null | true | 2017-08-02T18:31:12 | 2017-08-02T18:31:12 | 2017-05-26T18:16:33 | 2017-06-23T20:53:08 | 49 | 0 | 0 | 0 | null | null | null | import discord
import shlex
from abc import ABCMeta, abstractmethod
from boarbot.common.events import EventType
from boarbot.common.log import LOGGER
class BotModule(metaclass=ABCMeta):
def __init__(self, client: discord.Client):
self.client = client
@abstractmethod
async def handle_event(self, event_type: EventType, args):
...
'''
Parse a command in the format `@<bot> <command> <command arguments>`.
Returns either a list of string arguments, or None if this is not a valid
call to the given command.
'''
def parse_command(self, command: str, message: discord.Message, ignore_bots=True) -> [str]:
if ignore_bots and message.author.bot:
return None
content = message.content.strip() # type: str
mention = self.client.user.mention # type: str
#LOGGER.debug('Parsing content `{}` for command `{}` and user {}'.format(message.content.strip(), command, mention))
if not content.startswith(mention):
return None
content = content[len(mention):].strip() # type: str
try:
parts = shlex.split(content) # type: [str]
except Exception as e:
LOGGER.debug('Failed shlex.split on ' + str(content))
return None
if not parts or parts[0] != command:
return None
return parts[1:]
def load_opus(self):
if not discord.opus.is_loaded():
from ctypes.util import find_library
LOGGER.debug('libopus not automatically loaded')
opus_attempts = [
find_library('opus'),
'libopus.so.0',
'opus.dll',
'opus',
]
for opus in opus_attempts:
if not opus:
continue
try:
LOGGER.debug('Loading Opus codec from ' + opus)
discord.opus.load_opus(opus)
LOGGER.debug('Success!')
break
except OSError as e:
LOGGER.debug('Failed! ' + str(e))
else:
LOGGER.error('Could not load Opus codec! Voice support is not available.')
return discord.opus.is_loaded()
| UTF-8 | Python | false | false | 2,269 | py | 20 | botmodule.py | 16 | 0.561481 | 0.560159 | 0 | 67 | 32.865672 | 124 |
kimballXD/Example | 19,524,921,347,323 | f4d77421742a5b2a7730a2d1b39b094b4cf97978 | df50b358dfc48bfea3ea398adcdc99dad7b32f27 | /cralwer example/metal.py | 80f4ce5d68d46b5712ecac56ab99949666ada521 | []
| no_license | https://github.com/kimballXD/Example | ad027bd36f667718248bb734629ae3ea6f631272 | e4b6b70cf2f3258b390550ee0b000e6e7cf168ee | refs/heads/master | 2021-01-10T08:21:52.017877 | 2016-03-27T17:14:43 | 2016-03-27T17:14:43 | 54,837,836 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 09:59:16 2016
Crawling the member list of Taiwan Metal Indsutry Association
(http://www.trmsa.org.tw/Member.aspx)
Using Selenium package to request page. Parsing page with Selenium and BeautifulSoup package.
Python version: built from Anaconda 2.
@author: Wu
"""
def main(filePath, test, retryPath):
#%%
from bs4 import BeautifulSoup as bs
import pandas as pd
from selenium import webdriver
import time
import traceback
import pickle
#%%
saved={'dataList':None, 'index':None, 'load':False} if retryPath==None else pickle.load(open(retryPath,'rb'))
if saved['load']:
dataList=saved['dataList']
start=saved['index']-1
print 'start from {}'.format(start)
else:
dataList=[]; start=0
try:
fileCon=open(filePath,mode='w')
driver = webdriver.Firefox()
driver.implicitly_wait(10)
driver.get('http://www.trmsa.org.tw/Member.aspx')
submit=driver.find_element_by_id('ctl00_ContentPlaceHolder1_queryImageButton')
submit.click()
#%%
enName=driver.find_elements_by_css_selector('.catalog1 a:nth-of-type(2)')
enName.extend(driver.find_elements_by_css_selector('.catalog2 a:nth-of-type(2)'))
enName=[x.text for x in enName]
#test=10
pageN=len(enName) if test==0 else test
for index in range(start,pageN):
# has to 'refind' element whenver page has any chaged. even just reload
entries=driver.find_elements_by_css_selector('.catalog1 a:nth-of-type(1)')
entries.extend(driver.find_elements_by_css_selector('.catalog2 a:nth-of-type(1)'))
entry=entries[index]
entry.click()
page=driver.page_source
driver.back()
soup=bs(page,'lxml')
dataDict={'name':soup.select('#ctl00_ContentPlaceHolder1_nameLabel')[0].text,
'registerNo':soup.select('#ctl00_ContentPlaceHolder1_uidLabel')[0].text,
'address':soup.select('#ctl00_ContentPlaceHolder1_addrLabel')[0].text,
'since':soup.select('#ctl00_ContentPlaceHolder1_setDateLabel')[0].text,
'capital':soup.select('#ctl00_ContentPlaceHolder1_capitalLabel')[0].text,
'employee':soup.select('#ctl00_ContentPlaceHolder1_employeeLabel')[0].text,
'productType':soup.select('#ctl00_ContentPlaceHolder1_itemLabel')[0].text,
'product':soup.select('#ctl00_ContentPlaceHolder1_productLabel')[0].text,
'source':driver.current_url,
'enName':enName[index]}
dataList.append(dataDict)
time.sleep(1)
if index%10==0:
time.sleep(3)
except:
traceback.print_exc()
saved['dataList']=dataList
saved['index']=index
saved['load']=True
pickle.dump(saved,open('metalSaved.pkl','wb'))
return saved
#%%
data=pd.DataFrame(dataList)
data=data.drop_duplicates()
data['NGOname']=u'台灣區金屬品冶製工業同業公會'
data['NGOtype']=u'產業公會'
data['orgType']=u''
data['id']=u''
data=data.reindex_axis(['id','NGOname','NGOtype','name','orgType','address','employee','capital','since','registerNo','productType','product','source','enName'],1)
data.to_csv(fileCon,encoding='utf8',index=False,sep=';')
return data
fileCon.close()
print 'job has done for metal Association'
print 'file Destination '+filePath
print ''
#%%
if __name__=='__main__':
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('filePath',nargs='?',default=os.getcwd())
parser.add_argument('test',nargs='?',type=int, default=0)
parser.add_argument('--retryPath',nargs='?')
args = parser.parse_args()
filePath=os.getcwd()+'\\'+args.filePath+'\\metal.csv' if ':' not in args.filePath else args.filePath
retryPath=os.getcwd()+'\\'+args.retryPath if ((args.retryPath!=None) and (':' not in args.retryPath)) else args.retryPath
data=main(filePath, args.test, retryPath) | UTF-8 | Python | false | false | 4,289 | py | 8 | metal.py | 4 | 0.607336 | 0.590172 | 0 | 107 | 38.757009 | 167 |
TurtleZhong/uav_frontier_exploration_3d | 18,665,927,897,994 | cc542398ed4eaf835fa43c7b0b448e9ac1713dce | 320551bcf39da6ce25754b20515b14daeb888f19 | /scripts/execute_trajectory_state_machine.py | 2c5af99b30af540266036d6d108a376bde268afe | []
| no_license | https://github.com/TurtleZhong/uav_frontier_exploration_3d | f74a6f8e34b1542e4765d4aeb04ee7b702a29fb9 | 16419e19fdebea2bb45c9f947592a28c42b7f2ad | refs/heads/master | 2023-03-24T17:48:07.960403 | 2021-03-25T14:33:43 | 2021-03-25T14:33:43 | 359,473,829 | 1 | 2 | null | true | 2021-04-19T13:41:39 | 2021-04-19T13:41:38 | 2021-03-25T14:36:21 | 2021-03-25T14:36:18 | 69,286 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
__author__ = 'abatinovic'
import rospy, math, time
from geometry_msgs.msg import Pose, PoseStamped
from std_msgs.msg import Float64, Empty, Int32, Float32, String, Bool
from nav_msgs.msg import Odometry
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from trajectory_msgs.msg import MultiDOFJointTrajectoryPoint, \
MultiDOFJointTrajectory, JointTrajectory, JointTrajectoryPoint
from larics_motion_planning.srv import MultiDofTrajectory, \
MultiDofTrajectoryRequest, MultiDofTrajectoryResponse
import copy
class UavExplorationSm:
def __init__(self):
self.state = "start"
self.state_previous = "none"
self.target_pose = PoseStamped()
self.current_pose = Pose()
self.current_reference = MultiDOFJointTrajectoryPoint()
self.first_measurement_received = False
self.executing_trajectory = 0
self.start_time = time.time()
self.execution_start = time.time()
self.confirmed = False
self.service_called = False
# Initialize ROS params
# Distance of the UAV from the target pose at which we consider the
# trajectory to be executed
self.r_trajectory = rospy.get_param('radius_trajectory_executed', 0.5)
# Rate of the state machine.
self.rate = rospy.get_param('rate', 10)
# How long do we collect feedback and store it in array. Checks for execution
#relyes on this parameter because they check through last x
# seconds to determine next state.
self.feedback_collection_time = rospy.get_param('feedback_collection_time', 1.0)
self.dt = 1.0/float(self.rate)
self.n_feedback_points = int(self.rate*self.feedback_collection_time)
# Set up array of feedback poses
self.feedback_array = []
self.feedback_array_index = 0
for i in range(self.n_feedback_points):
temp_pose = Pose()
self.feedback_array.append(copy.deepcopy(temp_pose))
# Initialize publishers
self.state_pub = rospy.Publisher('exploration/current_state', String,
queue_size=1, latch=True)
self.trajectory_pub = rospy.Publisher('joint_trajectory', JointTrajectory,
queue_size=1)
self.point_reached_pub = rospy.Publisher('exploration/point_reached', Bool,
queue_size=1)
# Initialize services
print "Waiting for service multi_dof_trajectory."
rospy.wait_for_service('multi_dof_trajectory', timeout=30)
self.plan_trajectory_service = rospy.ServiceProxy(
"multi_dof_trajectory", MultiDofTrajectory)
rospy.Service('confirm_trajectory', SetBool, self.confirm_trajectory)
# Initialize subscribers
rospy.Subscriber('exploration/goal', PoseStamped,
self.targetPointCallback, queue_size=1)
rospy.Subscriber('carrot/trajectory', MultiDOFJointTrajectoryPoint,
self.referenceCallback, queue_size=1)
rospy.Subscriber('uav/cartographer/odometry_filtered_acc', Odometry,
self.globalPositionCallback, queue_size=1)
rospy.Subscriber('executing_trajectory', Int32,
self.executingTrajectoryCallback, queue_size=1)
time.sleep(0.2)
def run(self):
rate = rospy.Rate(self.rate)
self.state_pub.publish(self.state)
while not rospy.is_shutdown() and not self.first_measurement_received:
print ("Waiting for the first pose...")
time.sleep(1)
print ("The first pose received. Starting exploration state machine.")
while not rospy.is_shutdown():
# Start state only waits for something to happen
if self.state == "start":
if self.state_previous != self.state:
self.printStates()
self.state_previous = "start"
self.state_pub.publish(self.state)
# Planning the obstacle free trajectory in the map
if self.state == "plan":
if self.state_previous != self.state:
self.printStates()
self.state_previous = "plan"
self.state_pub.publish(self.state)
else:
print("Planning again!")
print ("Calling service!")
# Call the obstacle free trajectory planning service
request = MultiDofTrajectoryRequest()
# Create start point from current position information
trajectory_point = JointTrajectoryPoint()
trajectory_point.positions = [self.current_reference.transforms[0].translation.x, \
self.current_reference.transforms[0].translation.y, \
self.current_reference.transforms[0].translation.z, \
self.quaternion2Yaw(self.current_reference.transforms[0].rotation)]
request.waypoints.points.append(copy.deepcopy(trajectory_point))
# Create start point from target position information
trajectory_point = JointTrajectoryPoint()
trajectory_point.positions = [self.target_pose.position.x, \
self.target_pose.position.y, self.target_pose.position.z, \
self.quaternion2Yaw(self.target_pose.orientation)]
request.waypoints.points.append(copy.deepcopy(trajectory_point))
# Set up flags
request.publish_path = False
request.publish_trajectory = False
request.plan_path = True
request.plan_trajectory = True
response = self.plan_trajectory_service.call(request)
while not self.service_called and not rospy.is_shutdown():
rospy.sleep(0.01)
self.service_called = False
# if trajectory is OK publish it
if self.confirmed:
# If we did not manage to obtain a successful plan then go to
# appropriate state.
if response.success == False:
print ("**********************************************")
print ("In state:", self.state)
print ("Path planning failed!")
print ("**********************************************")
print (" ")
self.state = ("end")
# If plan was successful then execute it.
else:
self.trajectory_pub.publish(response.trajectory)
self.state = "execute"
# if trajectory is not OK, point is reached and go to "start" state
else:
self.state = "end"
# While trajectory is executing we check if it is done
if self.state == "execute":
if self.state_previous != self.state:
self.printStates()
self.state_previous = "execute"
self.state_pub.publish(self.state)
self.execution_start = time.time()
while not rospy.is_shutdown():
# When trajectory is executed simply go to end state.
if self.checkTrajectoryExecuted() == True:
print ("**********************************************")
print ("In state:", self.state)
print ("Execution timeout factor triggered!")
print ("**********************************************")
print (" ")
self.state = "end"
break
# If we want to send another point anytime
if self.state == "plan":
break
rate.sleep()
# End state, publish that you reached the point
if self.state == "end":
if self.state_previous != self.state:
self.printStates()
self.state_previous = "end"
self.state_pub.publish(self.state)
self.point_reached_pub.publish(True)
time.sleep(0.05)
# TODO: publish
self.state = "start"
rate.sleep()
def printStates(self):
print ("----------------------------------------------------")
print ("State changed. Previous state:", self.state_previous)
print ("State changed. Current state:", self.state)
print ("----------------------------------------------------")
print (" ")
def targetPointCallback(self, msg):
self.target_pose = msg.pose
self.state = "plan"
print ("New goal accepted.")
def globalPositionCallback(self, msg):
self.current_pose = msg.pose.pose
self.first_measurement_received = True
# Collect array of data with rate of the loop. We can fill this list
# in a cyclic manner since we have info about first and last data point
# stored in feedback_array_index
if ((time.time()-self.start_time) > self.dt):
self.start_time = time.time()
self.feedback_array[self.feedback_array_index] = copy.deepcopy(self.current_pose)
self.feedback_array_index = self.feedback_array_index + 1
if self.feedback_array_index >= self.n_feedback_points:
self.feedback_array_index = 0
def referenceCallback(self, msg):
self.current_reference = msg
def executingTrajectoryCallback(self, msg):
self.executing_trajectory = msg.data
def quaternion2Yaw(self, quaternion):
q0 = quaternion.w
q1 = quaternion.x
q2 = quaternion.y
q3 = quaternion.z
return math.atan2(2.0*(q0*q3 + q1*q2), 1.0-2.0*(q2*q2 + q3*q3))
def checkTrajectoryExecuted(self):
# Here we check if the UAV's has been within some radius from the target
# point for some time. If so we consider trajectory to be executed.
for i in range(self.n_feedback_points):
dx = self.target_pose.position.x - self.feedback_array[i].position.x
dy = self.target_pose.position.y - self.feedback_array[i].position.y
dz = self.target_pose.position.z - self.feedback_array[i].position.z
delta = math.sqrt(dx*dx + dy*dy + dz*dz)
if delta > self.r_trajectory:
return False
if self.executing_trajectory == 0:
return True
else:
return False
def confirm_trajectory(self, req):
self.service_called = True
self.confirmed = req.data
if (req.data):
print("Trajectory OK!")
else:
print("Trajectory is NOT OK!")
return SetBoolResponse(True, "Service confirm_trajectory called")
if __name__ == '__main__':
rospy.init_node('execute_trajectory_state_machine')
exploration = UavExplorationSm()
exploration.run()
| UTF-8 | Python | false | false | 9,898 | py | 28 | execute_trajectory_state_machine.py | 9 | 0.637503 | 0.630936 | 0 | 263 | 36.634981 | 91 |
thydungeonsean/Rainbowmancer | 15,161,234,583,743 | da38488dc49b44179845c888b109dd6ecf91b2a3 | f797c5fc3243944855ff9304a678f9d89ff85f93 | /src/map_objects/player/ablities/ability_collection.py | 173ff2d7efee33fc0028cb379394a2927f37ac42 | []
| no_license | https://github.com/thydungeonsean/Rainbowmancer | 551e857a85b76489b1cee3feb0e40f7832919712 | b8395fd2c25b83c84239a1aa198a0d134d7c60be | refs/heads/master | 2021-01-25T06:57:05.968319 | 2017-07-17T12:00:07 | 2017-07-17T12:00:07 | 93,628,155 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ability import Ability
from bolt import Bolt
from block import Block
from bind import Bind
from imbue import Imbue
from invoke import Invoke
from ray import Ray
from shatter import Shatter
from summon import Summon
def new_block_ability(inventory):
return Block(inventory)
def new_bolt_ability(inventory):
return Bolt(inventory)
def new_bind_ability(inventory):
return Bind(inventory)
def new_imbue_ability(inventory):
return Imbue(inventory)
def new_invoke_ability(inventory):
return Invoke(inventory)
def new_ray_ability(inventory):
return Ray(inventory)
def new_shatter_ability(inventory):
return Shatter(inventory)
def new_summon_ability(inventory):
return Summon(inventory)
ability_dict = {
'bolt': new_bolt_ability,
'block': new_block_ability,
'bind': new_bind_ability,
'imbue': new_imbue_ability,
'invoke': new_invoke_ability,
'ray': new_ray_ability,
'summon': new_summon_ability,
'shatter': new_shatter_ability
}
def new_ability(ability_id, inventory):
if ability_dict.get(ability_id) is not None:
return ability_dict[ability_id](inventory)
return Ability(ability_id, inventory)
| UTF-8 | Python | false | false | 1,202 | py | 66 | ability_collection.py | 66 | 0.717138 | 0.717138 | 0 | 69 | 16.42029 | 50 |
jessharri/Gravitate | 6,975,026,924,048 | e132745cfaee644d5615f216b304d440f5547406 | d34bfb34e1df3ae1e10d2dfbe2698601fc6b6ca5 | /gravitate/schemas/luggage.py | 6a223fcdce3cde69deaf88700c724eef41f45c97 | []
| no_license | https://github.com/jessharri/Gravitate | deeeae76bfdb1ac73fb031cc650f59e1e9c2d118 | d478cea5e391cda199134b5fc3245cf2a6b33b59 | refs/heads/master | 2023-04-05T00:34:23.838442 | 2019-06-23T09:47:00 | 2019-06-23T09:47:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # from marshmallow import fields
# from flask_marshmallow import Marshmallow
from flasgger import Schema, fields
# ma = Marshmallow()
class LuggageItemSchema(Schema):
luggage_type = fields.Str()
weight_in_lbs = fields.Number()
class LuggageCollectionSchema(Schema):
luggages = fields.Nested('LuggageItemSchema', many=True)
| UTF-8 | Python | false | false | 340 | py | 4 | luggage.py | 2 | 0.755882 | 0.755882 | 0 | 14 | 23.285714 | 60 |
nc-uw/ML-DeepLearning--Algos | 16,655,883,202,804 | 7ff5795b32746f987d8dc6b5a7e891183f5c8748 | 66586a969a732680d012428b2782184a0cf4c9f8 | /ML05 - Deep Learning/A. Tensorflow MNIST/tf_model.py | d8207350b648d34f91d60e00e2728fe41cfa70d8 | []
| no_license | https://github.com/nc-uw/ML-DeepLearning--Algos | 5dc95e5ff1baaa50d8b3e9d021e3afed8a56e6a8 | 4447f17bd7d874fe3efe558bb6ab496dfd90d7b5 | refs/heads/master | 2022-02-05T12:57:40.953670 | 2019-06-20T20:34:33 | 2019-06-20T20:34:33 | 117,786,396 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """This contains model functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import tensorflow as tf
class Config(object):
"""This is a wrapper for all configurable parameters for model.
Attributes:
batch_size: Integer for the batch size.
learning_rate: Float for the learning rate.
image_pixel_size: Integer for the flatten image size.
hidden1_size: Integer for the 1st hidden layer size.
hidden2_size: Integer for the 2nd hidden layer size.
num_class: Integer for the number of label classes.
max_iters: Integer for the number of training iterations.
model_dir: String for the output model dir.
"""
def __init__(self):
self.batch_size = 100
self.learning_rate = 1e-3
# Each image is 28x28.
self.image_pixel_size = 784
self.hidden1_size = 128
self.hidden2_size = 128
self.num_class = 10
self.max_iters = 400
self.model_dir = './model'
def placeholder_inputs_feedfoward(batch_size, feat_dim):
"""Creats the input placeholders for the feedfoward neural network.
Args:
batch_size: Integer for the batch size.
feat_dim: Integer for the feature dimension.
Returns:
image_placeholder: Image placeholder.
label_placeholder: Label placeholder.
"""
image_placeholder = tf.placeholder(tf.float32, shape=(None,feat_dim))
label_placeholder = tf.placeholder(tf.bool, shape=None)
return image_placeholder, label_placeholder
def fill_feed_dict(data_set, batch_size, image_ph, label_ph):
"""Given the data for current step, fills both placeholders.
Args:
data_set: The DataSet object.
batch_size: Integer for the batch size.
image_ph: The image placeholder, from placeholder_inputs_feedfoward().
label_ph: The label placehodler, from placeholder_inputs_feedfoward().
Returns:
feed_dict: The feed dictionary maps from placeholders to values.
"""
image_feed, label_feed = data_set.next_batch(batch_size)
feed_dict = {
image_ph: image_feed,
label_ph: label_feed,
}
return feed_dict
def feed_forward_net(images, config):
"""Creates a feedforward neuralnetwork.
Args:
images: Image placeholder.
config: The Config object contains model parameters.
Returns:
logits: Output tensor with logits.
"""
#Creates the 1st feed fully-connected layer with ReLU activation.
with tf.variable_scope('hidden_layer_1'):
# Creates two variables:
# 1) hidden1_weights with size [image_pixel_size, hidden1_size].
# 2) hidden1_biases with size [hidden1_size].
#check for images variable
weights = tf.Variable(tf.truncated_normal([config.image_pixel_size, config.hidden1_size],
stddev=1.0 / math.sqrt(float(config.image_pixel_size))),name='weights')
biases = tf.Variable(tf.zeros([config.hidden1_size]),name='biases')
# Performs feedforward on images using the two variables defined above.
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
#dropout prob = 1
hidden1_dropout = tf.nn.dropout(hidden1, 1)
#hidden1_dropout=hidden1
reg_1=tf.nn.l2_loss(weights)
#reg_1=0
#Creates the 2nd feed fully-connected layer with ReLU activation.
with tf.variable_scope('hidden_layer_2'):
# Creates two variables:
# 1) hidden2_weights with size [hidden1_size, hidden2_size].
# 2) hidden2_biases with size [hidden2_size].
weights = tf.Variable(tf.truncated_normal([config.hidden1_size, config.hidden2_size],
stddev=1.0 / math.sqrt(float(config.hidden1_size))),name='weights')
biases = tf.Variable(tf.zeros([config.hidden2_size]),name='biases')
# Performs feedforward on hidden1 using the two variables defined above.
hidden2 = tf.nn.relu(tf.matmul(hidden1_dropout, weights) + biases)
hidden2_dropout = tf.nn.dropout(hidden2, 1)
#hidden2_dropout=hidden2
reg_2=tf.nn.l2_loss(weights)
#reg_2=0
#Creates the pen-ultimate linear layer.
with tf.variable_scope('logits_layer'):
# Creates two variables:
# 1) logits_weights with size [config.hidden2_size, config.num_class].
# 2) logits_biases with size [config.num_class].
weights = tf.Variable(tf.truncated_normal([config.hidden2_size, config.num_class],
stddev=1.0 / math.sqrt(float(config.hidden2_size))),name='weights')
biases = tf.Variable(tf.zeros([config.num_class]),name='biases')
# Performs linear projection on hidden2 using the two variables above.
logits = tf.matmul(hidden2_dropout, weights) + biases
reg_3=tf.nn.l2_loss(weights)
#reg_3=0
return logits, reg_1+reg_2+reg_3
def compute_loss(logits, labels, reg):
"""Computes the cross entropy loss between logits and labels.
Args:
logits: A [batch_size, num_class] sized float tensor.
labels: A [batch_size] sized integer tensor.
reg: A [batch_size] sized float tensor.
Returns:
loss: Loss tensor.
"""
#Computes the cross-entropy loss.
#labels = tf.to_int64(labels)
#0.01= regularisation param
loss = 0.01 * reg + tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.to_int64(labels), logits=logits)
return loss
def evaluation(sess, image_ph, label_ph, data_set, eval_op):
"""Runs one full evaluation and computes accuracy.
Args:
sess: The session object.
image_ph: The image placeholder.
label_ph: The label placeholder.
data_set: The DataSet object.
eval_op: The evaluation accuracy op.
Returns:
accuracy: Float scalar for the prediction accuracy.
"""
##config=Config()
#Computes the accuracy.
feed_dict = fill_feed_dict(data_set, data_set.num_samples, image_ph, label_ph)
count=sess.run(eval_op, feed_dict=feed_dict)
accuracy=count/data_set.num_samples
print('Num samples: %d Num correct: %d Accuracy: %0.02f' %
(data_set.num_samples, count, accuracy))
return accuracy
| UTF-8 | Python | false | false | 6,424 | py | 14 | tf_model.py | 12 | 0.643836 | 0.626712 | 0 | 172 | 36.348837 | 122 |
Kutabix/matura | 4,415,226,393,040 | 833610fb3fcd0f5dc8003a4a5fbd984ebd06e8d6 | 6e23c34d439f8afdcfde8ec7012cb34f6958e7f6 | /63/zad1.py | 3deb3260fc4ea085a7752bfcece1f7bbd27362a4 | []
| no_license | https://github.com/Kutabix/matura | 53de8d3b086de1cd0fd271a9c0a9e804cc19e6e7 | 50655f2de70cad460bd7c6c3dcf333a159e81272 | refs/heads/master | 2022-04-19T07:24:55.565874 | 2020-04-21T00:12:48 | 2020-04-21T00:12:48 | 256,803,403 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from openfile import file
for binary in file():
if len(binary) % 2 == 0 and binary[:int(len(binary)/2)] == binary[int(len(binary)/2):]:
print(binary)
| UTF-8 | Python | false | false | 163 | py | 27 | zad1.py | 26 | 0.625767 | 0.601227 | 0 | 5 | 31.6 | 91 |
kitchWWW/sparse_music | 13,005,160,980,449 | 64ca686b326acd902595ed02fbd983b6d66ba355 | 2d0d0ef256f4cf882ab18233973e7d22c4f8c6b3 | /create.py | 0082bf75cfda4fa0e7a895716c008d7bd1643cbe | []
| no_license | https://github.com/kitchWWW/sparse_music | cf2f7e4f2cfa8efa573805bd278205ff8deaf1ef | 5a86bdef7e08e631375a9bc1870ad7c504f1d42d | refs/heads/master | 2020-03-27T18:45:06.448233 | 2018-08-31T20:50:24 | 2018-08-31T20:50:24 | 146,940,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import math
noteNames = ['c', 'cis', 'd', 'ees', 'e', 'f','fis','g','aes','a','bes','b']
def stringForNote(note,octave):
ret = noteNames[note%12]
direction = (math.floor(note/12) + octave)
if direction > 0:
ret += '\''*direction
elif direction < 0:
ret += ','*(-1*direction)
return ret
stepDirections = [[-1,2], # c -> b or d
[], # c#
[0, 4], # d -> c or e
[], # ees
[2, 5], # e
[4,7], # f
[4,7], # fis
[5,9], # g
[9], # aes
[7,11], # a
[9,12], # bes
[12,9], # b
[11,14], #c
[],
[12,16] #d
]
SPECIAL = [0]
NO_ACCENT = [1]
ONES_THAT_END = [2,3,4,5]
ONES_THAT_START = [6,7,8,9]
NORMAL = [10,11,12]
decorationText = [
'{0}1 s4 \n', #special, for last measure
's2 {0}4 s4 \n',#no accent note
's2 s4 {0}4 \\( \n', #ones that end
's2 {1}4 \\( {0}4 \n',
's8 s2 {1}8 \\( {0}4 \n',
's8 s2 {1}4 \\( {0}8 \n',
'{1}4 {0}4\\) s2 \n', #ones that start
'{0}4\\) s4 s2 \n',
'{1}4 {0}8\\) s2 s8 \n',
'{1}8 {0}4\\) s2 s8 \n',
's4. {1}4 \\({0}4\\) s8 \n', #normal stuff
's4. {1}4 \\({0}8\\) s4 \n',
's4. {1}8 \\({0}4\\) s4 \n',
]
# decorationText = [
# '{0}1 s4 \n', #special, for last measure
# 's2 {0}4 s4 \n',#no accent note
# 's2 s4 {0}4 \n', #ones that end
# 's2 {1}4 {0}4 \n',
# 's8 s2 {1}8 {0}4 \n',
# 's8 s2 {1}4 {0}8 \n',
# '{1}4 {0}4 s2 \n', #ones that start
# '{0}4 s4 s2 \n',
# '{1}4 {0}8 s2 s8 \n',
# '{1}8 {0}4 s2 s8 \n',
# 's4. {1}4 {0}4 s8 \n', #normal stuff
# 's4. {1}4 {0}8 s4 \n',
# 's4. {1}8 {0}4 s4 \n',
# ]
print("*** SPECIAL")
for i in SPECIAL:
print(decorationText[i])
print("*** NO_ACCENT")
for i in NO_ACCENT:
print(decorationText[i])
print("*** ONES THAT END")
for i in ONES_THAT_END:
print(decorationText[i])
print("*** ONES THAT START")
for i in ONES_THAT_START:
print(decorationText[i])
print("*** NORMAL")
for i in NORMAL:
print(decorationText[i])
decorationDirections = []
# notes = [[0,5,9],
# [2,5,7],
# [4,7,9],
# [5,0,9],
# [7,10,0],
# [9,4,2],
# [11,7,6],
# [12,0,0]
# ]
# notes = [
# [0, 0, 0],
# [2, 9, 0],
# [4, 7, 0],
# [5, 9, 0],
# [7,11, 5],
# [9, 2, 5],
# [11, 2, 5],
# [12,12,12]
# ]
notes = [[0,12],[9,2,5],[7],[9],[10,11],[14,2,0,4],[9,7],[14,2,12],[11,10],[8],[0]]
print("-----")
goodToGo = False
while not goodToGo:
numbNotes = 0
parts = []
instLyString = []
orderOfDecor = []
for i in range(len(notes)):
notePlaying = random.choice(notes[i])
decorationIndex = random.choice(range(len(decorationText)))
# print(str(instIndex) +': '+ str(entryNumber)+' '+ str(notePlaying)+' '+str(decorationIndex))
if(i==0):
decorationIndex = random.choice(NORMAL)
if(i==3):
decorationIndex = random.choice(ONES_THAT_END)
if(i==4):
decorationIndex = random.choice(ONES_THAT_START)
if(i>0 and orderOfDecor[i-1] in ONES_THAT_END):
decorationIndex = random.choice(ONES_THAT_START)
if(i==len(notes)-1):
decorationIndex=0
lyNotePlaying = stringForNote(notePlaying, 1)
decorativeNote = random.choice(stepDirections[notePlaying])
lyAccentNote = stringForNote(decorativeNote,1)
lyString = decorationText[decorationIndex].format(lyNotePlaying,lyAccentNote )
instLyString.append(lyString)
orderOfDecor.append(decorationIndex)
if(decorationIndex<3 or decorationIndex==7):
numbNotes+=1
else:
numbNotes+=2
instLyString.insert(4,' ~ a\'4 ')
instLyString.insert(6,' \\break ')
parts.append(' '.join(instLyString))
goodToGo = True
numbBoundries=0
hasNoAccent = False
for i in range(len(notes)-1):
if(orderOfDecor[i] in NO_ACCENT):
hasNoAccent = True
if(orderOfDecor[i] in SPECIAL):
goodToGo=False
if(orderOfDecor[i] in ONES_THAT_END):
if(orderOfDecor[i+1] not in ONES_THAT_START):
goodToGo = False
else:
numbBoundries+=1
if(orderOfDecor[i] in ONES_THAT_START):
if(orderOfDecor[i-1] not in ONES_THAT_END):
goodToGo = False
if(orderOfDecor[i] in NO_ACCENT):
if(orderOfDecor[i+1] in NO_ACCENT):
goodToGo=False
if(not hasNoAccent):
goodToGo = False
if(orderOfDecor[9] in NO_ACCENT):
goodToGo=False
if(numbBoundries != 2):
goodToGo=False
if(orderOfDecor[3] not in ONES_THAT_END):
goodToGo=False
if(orderOfDecor[0] not in NORMAL):
goodToGo=False
# if(numbNotes!=19):
# goodToGo=False
print(numbNotes)
#insert the paren
import time
millis = int(round(time.time() * 1000))
timestamp = ''
fd = open('template.ly')
out = open('out/output_'+timestamp+'.ly','w')
for l in fd:
if '%part' in l:
partNo = int(l[5:])
out.write(parts[partNo])
elif '%time' in l:
out.write(timestamp)
else:
out.write(l)
fd.close()
out.close()
| UTF-8 | Python | false | false | 4,564 | py | 6 | create.py | 2 | 0.591586 | 0.524321 | 0 | 199 | 21.919598 | 96 |
ykshatroff/site_yksname | 14,293,651,200,995 | 8e0758853afc60edba1451e1e6e3e399c19f7ae4 | d0230c00360c0edbda54842497092c6b5dc6df19 | /yksname/templatetags/__init__.py | 5dc4b95a25723c46871e95316ca559bf6a6bf20f | []
| no_license | https://github.com/ykshatroff/site_yksname | 3a98ae58081ca12db0cfdcef1baa4de33a6ae37d | f328ddef2643cdf1b2d20410ec6c29437bbcaa6e | refs/heads/master | 2016-08-08T19:28:49.102584 | 2015-12-26T12:29:11 | 2015-12-26T12:29:11 | 48,335,817 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Date: 19.12.15
from __future__ import unicode_literals
| UTF-8 | Python | false | false | 83 | py | 33 | __init__.py | 20 | 0.614458 | 0.53012 | 0 | 3 | 26 | 39 |
earl-grey-cucumber/Algorithm | 12,867,722,049,610 | 49c0a3762a452f1230b9defc3cc28e2e57b32d2e | 2a401f70b0b4beb1c96b12f84dbcb93e74357dd7 | /166-Fraction-to-Recurring-Decimal/solution.py | 4ec4b02e17d3ee2747becb122d818a55333e778a | []
| no_license | https://github.com/earl-grey-cucumber/Algorithm | 1321aa94e74afdc84542944cd523b3dacfcdb43f | ae4611f181fe32e6b81a04d6661037f06c0e2008 | refs/heads/master | 2021-06-05T12:16:16.708485 | 2016-10-11T06:21:00 | 2016-10-11T06:21:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def fractionToDecimal(self, numerator, denominator):
"""
:type numerator: int
:type denominator: int
:rtype: str
"""
if numerator == 0 or denominator == 0:
return "0"
sign = (numerator > 0 and denominator > 0) or (numerator < 0 and denominator < 0)
result = ""
if not sign:
result += "-"
up, down = abs(numerator), abs(denominator)
remain = up % down
result += str(up / down)
if remain == 0:
return result
result += "."
mapping = {}
i = len(result) # not i=2, result may be positive or negative, also may >= 10
while remain != 0 and remain not in mapping:
mapping[remain] = i
result += str(remain * 10 / down)
remain = (remain * 10) % down
i += 1
if remain == 0:
return result
result = result[0:mapping[remain]] + "(" + result[mapping[remain]:] # not i
result += ")"
return result
| UTF-8 | Python | false | false | 1,086 | py | 171 | solution.py | 171 | 0.494475 | 0.47698 | 0 | 31 | 34.032258 | 90 |
DenisYavetsky/UP | 6,425,271,089,284 | a0d27841ef7076be02508efe3493cee758d1b038 | d24b739e8b93331628bcbd1030e4be353ab2b9c1 | /up_app/views.py | 898f51413f8658e80439b00cc2c70cfb69b7962c | []
| no_license | https://github.com/DenisYavetsky/UP | 083e5c3e5e56a22bc453cffa60b86aa44bdb0781 | 29eaaa942546bbd861bd6dd1c749bebc1e1ac987 | refs/heads/master | 2023-07-13T14:17:50.987005 | 2021-08-31T18:44:25 | 2021-08-31T18:44:25 | 401,103,412 | 0 | 0 | null | false | 2021-08-31T18:44:26 | 2021-08-29T17:36:32 | 2021-08-29T17:43:53 | 2021-08-31T18:44:26 | 2,514 | 0 | 0 | 0 | JavaScript | false | false | from django.shortcuts import render
from .models import *
from bs4 import BeautifulSoup as bs
import requests
import urllib.request
from django.http import HttpResponse
from django.urls import reverse
from django.core import serializers
from .forms import OrderForm
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.db.models import Avg, Max, Min
def product_list(request):
data = Product.objects.all()
collection = Collection.objects.all()
return render(request, 'up_app/main.html', context={'data': data, 'collection': collection})
def cart_delete(request, pk):
cart = get_object_or_404(Cart, pk=pk)
if cart:
cart.delete()
return redirect('cart')
def plan(request):
from up_app.custom_script import get_dates_of_week
days = get_dates_of_week(0)
orders = Order.objects.all()
for o in orders:
if o.extradition_date:
a = o.extradition_date.strftime('%B %d')
for i in range(len(days)):
ext = {
'n_order': '',
'products': [
]
}
carts = Cart.objects.all().filter(order=o)
for cart in carts:
name = cart.product.name
count = cart.count
pr = {'name': '',
'count': ''}
if days[i]['month'] + ' ' + days[i]['day'] == a:
if ext['n_order'] != str(o.number):
ext['n_order'] = str(o.number)
pr['name'] = name
pr['count'] = count
ext['products'].append(pr)
days[i]['extraditions'].append(ext) # = 'Выдача: Заказ №' + str(o.number)
else:
pr['name'] = name
pr['count'] = count
days[i]['extraditions'][0]['products'].append(pr)
print(days)
return render(request, 'up_app/plan.html', context={'days': days})
def cart(request):
total = 0
count = 0
# При создании заказа корзинам надо прислоить id заказа
# Если у корзины есть не нулевой статус то эти корзины считаются закрытыми и не выводятся пользователю
form = OrderForm()
# Корзины
carts = Cart.objects.filter(session_key=request.session.session_key, order__number__isnull=True)
print(request.session.session_key)
if request.method == 'POST':
form = OrderForm(request.POST)
number = Order.objects.all().aggregate(Max('number'))
if form.is_valid():
# статус - В обработке
status = get_object_or_404(OrderStatus, pk=1)
Ord = form.save(commit=False)
Ord.status = status
# номер новой заявки
if str(number['number__max']) == 'None':
Ord.number = 1
else:
Ord.number = number['number__max'] + 1
# Ord.number = 1
Ord.save()
for cart in carts:
cart.order = Ord
cart.save()
return redirect('catalog')
for d in carts:
total = total + (d.count * d.cost)
count = count + d.count
return render(request, 'up_app/cart.html', context={'data': carts, 'total': total, 'form': form, 'count': count})
def filter123(request):
session_key = request.session.session_key
if len(request.GET) > 0:
val = []
checkbox = request.GET
for c in checkbox:
if checkbox[c] == 'check':
val.append(int(c[0]))
if len(val) > 0:
data = Product.objects.filter(collection__in=val)
else:
data = Product.objects.all()
else:
data = Product.objects.all()
# collection = Collection.objects.all()
cart = Cart.objects.all()
# count = cart.filter(session_key=session_key).count()
data = serializers.serialize('json', data)
# return render(request, 'up_app/index.html', context={'data': data, 'count': count, 'collection': collection})
return HttpResponse(data, content_type='application/json')
def catalog(request):
session_key = request.session.session_key
if not session_key:
request.session.cycle_key()
data = Product.objects.all()
collection = Collection.objects.all()
cart = Cart.objects.all()
count = cart.filter(session_key=session_key, order__number__isnull=True).count()
return render(request, 'up_app/index.html', context={'data': data, 'count': count, 'collection': collection})
def product_change(request):
cart = get_object_or_404(Cart, pk=request.POST['cartId'])
cart.count = request.POST['count']
cart.save()
return redirect('cart')
def product_add(request):
collection = Collection.objects.all()
data = Product.objects.all()
# получить по сессии есть ли такие корзины
# найти товар с id в этих корзинах
# если есть изменить количество
# если нет создать новую корзину
cart = Cart.objects.all()
tmp = cart.filter(session_key=request.session.session_key, order__number__isnull=True)
if tmp.count() >= 1:
tmp2 = tmp.filter(product=request.POST['product'])
if tmp2.count() > 0:
# изменяем запись добавляя count
c = tmp2.get(product=request.POST['product'])
c.count += int(request.POST['count'])
c.cost = data.get(pk=request.POST['product']).price
c.save()
else:
# если не найдено создаем новыу корзину
c = Cart()
c.session_key = request.session.session_key
c.count = int(request.POST['count'])
c.cost = data.get(pk=request.POST['product']).price
c.product = Product.objects.get(pk=request.POST['product'])
c.save()
else:
# если нет корзин
c = Cart()
c.session_key = request.session.session_key
c.count = int(request.POST['count'])
c.cost = data.get(pk=request.POST['product']).price
c.product = Product.objects.get(pk=request.POST['product'])
c.save()
cart = Cart.objects.all()
return render(request, 'up_app/index.html', context={'data': data, 'cart': cart, 'collection': collection})
def parse(request):
header = {'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(HTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
# Адрес для парсинга
url = 'https://www.livemaster.ru/ulicapryanikov'
request = requests.get(url, headers=header)
if request.status_code == 200:
soup = bs(request.content, 'html.parser')
cards = soup.find_all('div', {'class': 'col-xs-6 col-sm-3 col-md-3'})
# создание нового продукта
for card in cards:
p = Product()
title = card.find('div', {'class': 'item-preview__info-container'}).find_all(['span'])
p.name = title[0].text
print(title[1].text)
a = (title[1].text).split()
b = ''.join(a)
p.price = b[:-3]
sss = str(card.find_all(['img'])[0])
path = sss[sss.find('http'):sss.find('jpg') + 3]
# print('media/' + (title[0].text).replace('"', '') + '.jpg')
out = open(
'C:/Users/User/PycharmProjects/ulicaPryanikov/media/' + (title[0].text).replace('"', '') + '.jpg',
'wb')
p.picture = (title[0].text).replace('"', '') + '.jpg'
p.description = '123'
p.save()
resource = urllib.request.urlopen(path)
out.write(resource.read())
out.close()
def get_delivery(request, pk):
if request.is_ajax():
delivery = Delivery.objects.get(pk=pk)
return HttpResponse(delivery.price)
def search(request):
q = ''
if request.is_ajax():
q = request.GET.get('q')
return render(request, 'up_app/res.html', {'q': q})
def test(request):
# if request == 'POST':
# param = request.GET.get('time','Fail')
return render(request, 'up_app/test.html')
| UTF-8 | Python | false | false | 8,690 | py | 19 | views.py | 10 | 0.556426 | 0.546901 | 0 | 248 | 32.443548 | 117 |
Naxaes/OpenGLpython | 13,924,284,012,596 | eecf965634d32f5bebe346bea45f7a8484614e42 | 9632b023d486ace2be302d7956da5759d95e8a93 | /source/shader.py | 24c7cdf4cb4463f9285dc0e7138d02fa43ed57c5 | []
| no_license | https://github.com/Naxaes/OpenGLpython | cad3e09a51e11c47d72ce9dde53d4fdd70adeeff | de01845f18619cfae8c64ac8a7da4cf1b8f99130 | refs/heads/master | 2022-03-06T07:21:31.016828 | 2022-02-10T11:39:14 | 2022-02-10T11:39:14 | 93,307,461 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyglet.gl import (
glUseProgram,
glUniform4ui, glUniform4ui, glUniform4ui, glUniform4ui,
glUniform1i, glUniform2i, glUniform3i, glUniform4i,
glUniform1d, glUniform2d, glUniform3d, glUniform4d,
glUniform1f, glUniform2f, glUniform3f, glUniform4f,
glUniformMatrix4fv,
GL_TRUE,
GLException,
glCreateShader, GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, glShaderSource, glCompileShader, glCreateProgram,
glAttachShader, glBindAttribLocation, glLinkProgram, glValidateProgram, glGetShaderiv, GL_INFO_LOG_LENGTH,
glGetShaderInfoLog, glGetProgramInfoLog, glGetUniformLocation, glGetProgramiv, GLint, GLfloat,
)
from source.c_bindings import *
class Shader:
bound = None # This is okay if we assume we're only going to need one OpenGL context.
@classmethod
def create(cls, vertex_source, fragment_source, attributes, uniforms):
number_of_string = 1
# Create vertex shader.
vertex_handle = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_handle, number_of_string, c_pointer_to_char_pointers(vertex_source), None)
glCompileShader(vertex_handle)
# Create fragment shader.
fragment_handle = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_handle, number_of_string, c_pointer_to_char_pointers(fragment_source), None)
glCompileShader(fragment_handle)
# Create attributes.
attribute_mapping = []
for attribute in attributes:
attribute_mapping.append(c_string(attribute))
try:
# Create program.
program_handle = glCreateProgram()
glAttachShader(program_handle, vertex_handle)
glAttachShader(program_handle, fragment_handle)
for index, name in enumerate(attribute_mapping):
glBindAttribLocation(program_handle, index, name)
glLinkProgram(program_handle)
glValidateProgram(program_handle)
glUseProgram(program_handle)
except GLException as error:
# Print vertex shader errors.
status = GLint()
glGetShaderiv(vertex_handle, GL_INFO_LOG_LENGTH, byref(status))
output = create_string_buffer(status.value)
glGetShaderInfoLog(vertex_handle, status, None, output)
print(output.value.decode('utf-8'))
# Print fragment shader errors.
status = GLint()
glGetShaderiv(fragment_handle, GL_INFO_LOG_LENGTH, byref(status))
output = create_string_buffer(status.value)
glGetShaderInfoLog(fragment_handle, status, None, output)
print(output.value.decode('utf-8'))
# Print program errors.
status = GLint()
glGetProgramiv(program_handle, GL_INFO_LOG_LENGTH, byref(status)) # Getting the number of char in info log to 'status'
output = create_string_buffer(status.value) # status.value)
glGetProgramInfoLog(program_handle, status, None, output)
print(output.value.decode('utf-8'))
raise error
# Get uniform location.
uniform_mapping = {}
for uniform in uniforms:
name = c_string(uniform)
location = glGetUniformLocation(program_handle, cast(pointer(name), POINTER(c_char)))
uniform_mapping[uniform] = location
return cls(program_handle, uniform_mapping)
def __init__(self, id_, uniform_mapping):
self.id = id_
self.uniform = uniform_mapping
def enable(self):
glUseProgram(self.id)
Shader.bound = self # Just for safety.
@staticmethod
def disable():
glUseProgram(0)
Shader.bound = None
def is_bound(self):
return Shader.bound is self
def _assert_bound(self):
assert self.is_bound(), "Must bind this shader ({}) before being able to load uniform.".format(self.id)
def load(self):
pass
def load_uniform_matrix(self, **uniforms):
self._assert_bound()
for name, data in uniforms.items():
glUniformMatrix4fv(self.uniform[name], 1, GL_TRUE, data.ctypes.data_as(POINTER(GLfloat)))
def load_uniform_floats(self, **uniforms):
self._assert_bound()
for name, data in uniforms.items():
if isinstance(data, (float, int)):
glUniform1f(self.uniform[name], data)
else:
functions = glUniform2f, glUniform3f, glUniform4f
functions[len(data) - 2](self.uniform[name], *data)
def load_uniform_sampler(self, **uniforms):
self._assert_bound()
for name, data in uniforms.items():
glUniform1i(self.uniform[name], data)
| UTF-8 | Python | false | false | 4,762 | py | 57 | shader.py | 38 | 0.639647 | 0.633347 | 0 | 136 | 34.014706 | 131 |
Aasthaengg/IBMdataset | 19,576,460,940,013 | 053ad5f5c6cfbde6cd479fb594ef61b9c5ffca95 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03265/s646381713.py | f5e5124423a8bf25647e934365d82cd5639d65ed | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | x_1, y_1, x_2, y_2 = map(int, input().split())
vec_x = -(y_2 - y_1)
vec_y = x_2 - x_1
x_3, y_3 = x_2 + vec_x, y_2 + vec_y
x_4, y_4 = x_1 + vec_x, y_1 + vec_y
print(x_3, y_3, x_4, y_4)
| UTF-8 | Python | false | false | 187 | py | 202,060 | s646381713.py | 202,055 | 0.454545 | 0.347594 | 0 | 9 | 19.777778 | 46 |
lovewin99/pyspace | 627,065,259,095 | 2ea50db20e40ff8b1004c20ce8ec3d4555c00aa7 | 74d3f172ce860777b35657f2f08677324a4701d6 | /com/wangxy/finup/AgglomerativeDemo.py | 2b87b42ec436a85d13f6e4829226f73c7d740602 | []
| no_license | https://github.com/lovewin99/pyspace | b825567fffeb963424c374d4539247c6c2087fc0 | ae4675b142fb5a206cc525c681361a9224e88940 | refs/heads/master | 2020-04-06T07:03:28.622896 | 2017-10-27T02:24:07 | 2017-10-27T02:24:07 | 47,744,396 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf8
__author__ = 'wangxy'
import numpy as np
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
yuqi = open("/Users/wangxy/Desktop/taobao_trans_repaid_feature.info")
noyuqi = open("/Users/wangxy/Desktop/taobao_trans_over_dure_feature.info")
yuqiAppNo = []
mixdata = []
for fr in yuqi.readlines():
strArr = fr.strip().split('\001')
yuqiAppNo.append(strArr[0])
mixdata.append(strArr[1:])
yuqilen = len(mixdata)
noyuqiAppNo = []
for fr in noyuqi.readlines():
strArr = fr.strip().split('\001')
noyuqiAppNo.append(strArr[0])
mixdata.append(strArr[1:])
noyuqilen = len(mixdata) - yuqilen
print mixdata
# np.any(np.isnan(np.array(mixdata)))
# np.all(np.isfinite(mixdata))
# k_means = KMeans(init='k-means++', n_clusters=8, n_init=10)
# k_means.fit(mixdata)
#
# k_means_labels = pairwise_distances_argmin(mixdata, k_means.cluster_centers_)
#
model = AgglomerativeClustering(affinity="cosine",n_clusters=10,linkage="complete")
y = model.fit_predict(mixdata)
a = y[:yuqilen]
b = y[yuqilen:]
# a = k_means_labels[:yuqilen]
# b = k_means_labels[yuqilen:]
#
# a = k_means_labels[:yuqilen]
# b = k_means_labels[yuqilen:]
#
print 'len=',len(a),' 1=',len([i for i in a if i == 1]),' 0=',len([i for i in a if i == 0]),' 2=',len([i for i in a if i == 2]),' 3=',len([i for i in a if i == 3]),' 4=',len([i for i in a if i == 4]),' 5=',len([i for i in a if i == 5]),' 6=',len([i for i in a if i == 6]),' 7=',len([i for i in a if i == 7]),' 8=',len([i for i in a if i == 8]),' 9=',len([i for i in a if i == 9]),' 10=',len([i for i in a if i == 10])
print 'len=',len(b),' 1=',len([i for i in b if i == 1]),' 0=',len([i for i in b if i == 0]),' 2=',len([i for i in b if i == 2]),' 3=',len([i for i in b if i == 3]),' 4=',len([i for i in b if i == 4]),' 5=',len([i for i in b if i == 5]),' 6=',len([i for i in b if i == 6]),' 7=',len([i for i in b if i == 7]),' 8=',len([i for i in b if i == 8]),' 9=',len([i for i in b if i == 9]),' 10=',len([i for i in b if i == 10])
| UTF-8 | Python | false | false | 2,085 | py | 40 | AgglomerativeDemo.py | 38 | 0.596163 | 0.565468 | 0 | 52 | 39.076923 | 428 |
igorgoncalves/NutriData | 8,873,402,477,038 | d05920c8b884ec227cb0980bf7cc62e4bae8a493 | 6a25ea6835d8c3d9e0411449afc5a1af6fc2cba6 | /server/webapp/commands.py | 37111c568c04d3f0367e92c2f57129542b4c73a1 | []
| no_license | https://github.com/igorgoncalves/NutriData | 8887be76c84e92de2a21bab28cbd7aeb61eeba2f | a77e6645f2ae3cba8aabdea3bb41b4e26218abc4 | refs/heads/master | 2022-12-25T14:24:17.302174 | 2020-06-03T00:31:54 | 2020-06-03T00:31:54 | 161,583,550 | 1 | 1 | null | false | 2022-12-10T16:46:43 | 2018-12-13T04:23:58 | 2020-07-24T00:46:06 | 2022-12-10T16:46:42 | 89,806 | 1 | 0 | 23 | Vue | false | false | import json
import os
from flask_script import Command, Option, commands
from webapp import manager
from webapp.modules.localidade.services import LocalidadeService
from .modules.macroindicadores.services import IndicadorService
from .modules.user.services import UserService
class Initdb(Command):
def run(self):
cwd = os.getcwd()
service_localidade = LocalidadeService()
arquivo = open(os.getcwd()+"/app/carga_localidades.json").read()
localidades = json.loads(arquivo)
for localidade in localidades:
localidade = service_localidade.deserialize(localidade)
service_localidade.create(item=localidade)
print("{nome} foi adicionado".format(nome=localidade.nome))
manager.add_command('initdb', Initdb())
class CreateUser(Command):
option_list = (
Option('--username', '-u', dest='username'),
Option('--password', '-p', dest='password'),
Option('--email', '-e', dest='email'),
)
def run(self, username="", password="", email=""):
UserService.create_user(
username=username, password=password, email=email)
manager.add_command('createuser', CreateUser())
| UTF-8 | Python | false | false | 1,199 | py | 67 | commands.py | 47 | 0.673895 | 0.673895 | 0 | 40 | 28.975 | 72 |
jenga/cerebellum | 8,538,395,015,846 | 88fe1e39c34bb0ad1a3130e269256656519480e0 | 1a25ceb3ff12859bc47079da806eab9e3d1bec53 | /models/links.py | a6813a65d4d5a744cc80609e3aedcb6dbd0fc921 | []
| no_license | https://github.com/jenga/cerebellum | 6758a4b06325588666110c6cafe6c4d3e3458fa4 | 387699ca648466a82044913609b2e572e42de737 | refs/heads/master | 2020-06-03T12:52:01.526910 | 2014-05-09T17:36:39 | 2014-05-09T17:36:39 | 9,897,184 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rhesus.models.link import Link
class InhSynapse(Link):
def __init__(self, name, pre, post, g_tau=2.5, a=1.0, f_inc=0.0, d_inc=-0.59,
f_init=1.00, d_init=1.0, f_tau=1.0, d_tau=77.67):
super().__init__(name, pre, post, 'spike', 'inh')
self.g = 0
self.f_init = f_init
self.d_init = d_init
self.f = f_init
self.d = d_init
self.g_tau = g_tau
self.a = a
self.f_inc = f_inc
self.d_inc = d_inc
self.f_tau = f_tau
self.d_tau = d_tau
def transfer(self, dt):
s = self.pre.getOutput(self.type)
self.g = self.g + (-self.g / self.g_tau) * dt + self.a * self.f * self.d * s
if s > 0.0:
self.f = self.f + self.f_inc * (1 - self.f)
self.d = self.d + self.d_inc * self.d
self.f = self.f + ((self.f_init - self.f) / self.f_tau) * dt
self.d = self.d + ((self.d_init - self.d) / self.d_tau) * dt
return self.g*0.5
class ISource(Link):
def __init__(self, name, pre, post):
super().__init__(name, pre, post, 'I', 'isource')
def transfer(self, dt):
return self.pre.getOutput(self.type)
class NoiseLink(Link):
def __init__(self, name, pre, post):
super().__init__(name, pre, post, 's', 's')
def transfer(self, dt):
return self.pre.getOutput(self.type)
class INoise(Link):
def __init__(self, name, pre, post):
super().__init__(name, pre, post, 'c', 'isource')
def transfer(self, dt):
return self.pre.getOutput(self.type)
| UTF-8 | Python | false | false | 1,593 | py | 18 | links.py | 9 | 0.514752 | 0.498431 | 0 | 52 | 29.538462 | 84 |
luckcul/BUAA-Score-Notification | 566,935,706,685 | 8143ea99324b1081cd7369b1e784d19dee5bfeb0 | a9a90d37fc9e4c1487fe1f1f7cbd02a5cddfcd5a | /login.py | 0d690baa8b311cfb7df72977005478c103af27ec | []
| no_license | https://github.com/luckcul/BUAA-Score-Notification | e2f8c8164c329967c79bbc6b3cbb08f3dbb7950b | 2d023f065cb67d023f795ddeb87194a48c767dba | refs/heads/master | 2021-01-11T21:49:08.173823 | 2017-02-27T07:03:50 | 2017-02-27T07:03:50 | 78,857,525 | 2 | 1 | null | false | 2017-02-22T01:52:21 | 2017-01-13T14:44:03 | 2017-02-14T07:56:42 | 2017-02-22T01:52:21 | 5 | 2 | 1 | 0 | Python | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-01-12 19:33:33
# @Author : luckcul (tyfdream@gmail.com)
# @Version : 2.7.12
import os
import urllib
import urllib2
import cookielib
import re
from ocr import ocr
import socket
headers = {
'Host':"gsmis.graduate.buaa.edu.cn",
'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0",
'Accept':"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
'Accept-Language':"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
'Referer':"http://gsmis.graduate.buaa.edu.cn/gsmis/indexAction.do",
'Connection':"keep-alive",
'Upgrade-Insecure-Requests':"1",
'Cookie' : ''
}
postdata = {
'id' : '',
'password' : '',
'checkcode' : ''
}
#create cookie
cookjar = cookielib.LWPCookieJar()
cookie_support = urllib2.HTTPCookieProcessor(cookjar)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
#set socket time
socket.setdefaulttimeout(15)
def login(user, password):
# print user, password
hostUrl = r'gsmis.graduate.buaa.edu.cn'
postUrl = r'http://gsmis.graduate.buaa.edu.cn/gsmis/indexAction.do'
postdata['id'] = user
postdata['password'] = password
url1 = r'http://gsmis.graduate.buaa.edu.cn/gsmis/Image.do'
req = urllib2.Request(url1)
res = urllib2.urlopen(req)
pic = res.read()
save = open(r'image.jpg', 'wb')
save.write(pic)
save.close()
# os.system('image.jpg')
# verificationCode = raw_input()
verificationCode = ocr('image.jpg')
postdata['checkcode'] = verificationCode
# print verificationCode
s = str(cookjar)
#add cookie to headers
rule = r'Cookie (.*) for'
cookieID = re.findall(rule, s)[0]
headers['Cookie'] = cookieID
#post data
POSTDATA = urllib.urlencode(postdata)
reqq = urllib2.Request(postUrl, POSTDATA, headers)
ress = urllib2.urlopen(reqq)
wow = r'http://gsmis.graduate.buaa.edu.cn/gsmis/toModule.do?prefix=/py&page=/pySelectCourses.do?do=xsXuanKe'
req1 = urllib2.Request(wow, None ,headers)
res1 = urllib2.urlopen(req1)
def getScoreHtml():
url2 = r'http://gsmis.graduate.buaa.edu.cn/gsmis/py/pyYiXuanKeCheng.do'
req2 = urllib2.Request(url2, None, headers)
res2 = urllib2.urlopen(req2)
con = res2.read()
con = ''.join(con.split('\n'))
# print len(con)
if len(con) < 10000 :
return False
handlee = file('a.html', 'w')
handlee.write(con)
handlee.close()
return True
def getHtml(user, password):
while getScoreHtml() == False:
login(user, password)
print 'x', | UTF-8 | Python | false | false | 2,480 | py | 7 | login.py | 5 | 0.697177 | 0.66371 | 0 | 93 | 25.677419 | 109 |
zensen6/album_practice | 14,800,457,340,342 | ae8180b0bd8dbe1c5ef416b2f53395dddae5672b | 86c1d35377a12abb8610d8f8742035b3c6478158 | /main/migrations/0004_auto_20200728_1656.py | 512333389c79e1ff3a720d068e2f384c167fc993 | []
| no_license | https://github.com/zensen6/album_practice | cc21f05ce241a79edcd7816e6113ddcc531f63c8 | bcb37642789dda38835bd8029ad2df3e97cb9498 | refs/heads/master | 2023-03-26T18:03:52.396768 | 2021-03-15T11:55:46 | 2021-03-15T11:55:46 | 347,950,171 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.8 on 2020-07-28 11:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20200728_1651'),
]
operations = [
migrations.RemoveField(
model_name='song',
name='artist',
),
migrations.AddField(
model_name='song',
name='artist',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.Album'),
),
]
| UTF-8 | Python | false | false | 595 | py | 14 | 0004_auto_20200728_1656.py | 11 | 0.566387 | 0.514286 | 0 | 23 | 23.869565 | 109 |
ryangniadek/.dotfiles | 6,330,781,820,862 | fc13befdc3c16edfe58682b09cd455702b45dcc9 | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/stdlib/2and3/distutils/msvccompiler.pyi | 304b68356c56237cd4039ef59a19a290cdcc1a0c | [
"MIT",
"Apache-2.0"
]
| permissive | https://github.com/ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | false | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | 2020-03-22T20:39:47 | 2020-09-12T17:28:00 | 991,651 | 0 | 0 | 4 | Python | false | false | # Stubs for distutils.msvccompiler
from distutils.ccompiler import CCompiler
class MSVCCompiler(CCompiler): ...
| UTF-8 | Python | false | false | 121 | pyi | 379 | msvccompiler.pyi | 278 | 0.760331 | 0.760331 | 0 | 6 | 18.166667 | 41 |
Kai-Eiji/Intern-Info-App | 18,975,165,520,277 | 73371cac8750be161c812265ce931b17f0e97fb5 | 6f0b3140dc546afcf90b10c2c239c1bf09e59244 | /django_react_proj/students/migrations/0008_auto_20200525_1039.py | fc0de9dc9d2a76d183c406d73071b7df6e5c4ca2 | []
| no_license | https://github.com/Kai-Eiji/Intern-Info-App | 89713a44ac24df041b22d9afb9d56b669ca66bae | 7f4650151ae1ca1c66a084f2fd45ec590a13caf5 | refs/heads/master | 2023-05-15T05:43:06.575548 | 2021-06-07T02:23:31 | 2021-06-07T02:23:31 | 372,345,291 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.6 on 2020-05-25 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0007_auto_20200525_0949'),
]
operations = [
migrations.AlterField(
model_name='student',
name='prev_exp_num',
field=models.CharField(max_length=10),
),
]
| UTF-8 | Python | false | false | 394 | py | 21 | 0008_auto_20200525_1039.py | 21 | 0.588832 | 0.505076 | 0 | 18 | 20.888889 | 50 |
js4683/CS172 | 14,628,658,610,473 | 50af56058fcd7009b25ee64b093b7dbca1f65ea3 | b52ec6344dd3156e882fdf5a751656cc768a015e | /Week 5/Lab5-1.py | 7a8f8778f389e51872649a502d1fe436f19f0d6c | []
| no_license | https://github.com/js4683/CS172 | 0989169aec04ff9ebb5734b6067c7e63e6a13af2 | dbd6f933816725e5492264a6d874e3bb1306e8bb | refs/heads/master | 2022-02-28T16:13:45.640423 | 2019-11-05T15:17:41 | 2019-11-05T15:17:41 | 219,775,054 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
import random
from Drawable import Rectangle, Snowflake
pygame.init()
surface = pygame.display.set_mode((600,500))
pygame.display.set_caption("Lab 5")
drawables = []
green = Rectangle(0, 200 ,600 ,300 , (0,128,0))
sky = Rectangle(0, 0, 600, 300, (0,0,205))
snow = Snowflake()
drawables.append(green)
drawables.append(sky)
drawables.append(snow)
for i in range(0, 100):
newSnow = Snowflake()
newSnow.setLoc((random.randint(0, 600), random.randint(0,600)))
drawables.append(newSnow)
counter = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
counter += 1
if counter %2 == 0:
for drawable in drawables:
if isinstance(drawable, Snowflake) == True:
drawable.setLoc((random.randint(0, 600), random.randint(0, 600)))
drawable.draw(surface)
else:
drawable.draw(surface)
pygame.display.update()
| UTF-8 | Python | false | false | 1,055 | py | 29 | Lab5-1.py | 28 | 0.62654 | 0.570616 | 0 | 33 | 30.969697 | 81 |
dimkoug/taskproject2 | 19,241,453,505,140 | 87ad890f00a73847b4144c0107bac6a9e8bcc84e | 133c523977c785ce35aef5f45e2ac52795114bca | /taskproject2/projects/api/viewsets.py | 27747ec37026c0136c8cdcd54b90de4a49454701 | []
| no_license | https://github.com/dimkoug/taskproject2 | 68f8eb2a62a345f013d9957bd26a59ed8cee7d83 | 5275e86f5e37adb6e8b0bf8a4766ee9ed715122d | refs/heads/master | 2023-03-17T08:51:34.077300 | 2023-03-11T19:08:39 | 2023-03-12T08:27:51 | 41,932,817 | 2 | 0 | null | false | 2022-11-22T07:54:10 | 2015-09-04T18:54:36 | 2021-06-09T07:09:38 | 2022-11-22T07:54:07 | 4,658 | 1 | 0 | 3 | JavaScript | false | false | from rest_framework import authentication, permissions, viewsets, filters
from django_filters import rest_framework as django_filters
from ..models import Category, Project, Task
from .serializers import CategorySerializer, ProjectSerializer, TaskSerializer
class CategoryViewSet(viewsets.ModelViewSet):
"""API endpoint for listing and creating categories."""
queryset = Category.objects.order_by('title')
serializer_class = CategorySerializer
search_fields = ('title', )
ordering_fields = ('title', )
class ProjectViewSet(viewsets.ModelViewSet):
"""API endpoint for listing and creating projects."""
queryset = Project.objects.order_by('title')
serializer_class = ProjectSerializer
search_fields = ('category', 'title', )
ordering_fields = ('title', )
class TaskViewSet(viewsets.ModelViewSet):
"""API endpoint for listing and creating tasks."""
queryset = Task.objects.order_by('title')
serializer_class = TaskSerializer
search_fields = ('project', 'title', )
ordering_fields = ('title', )
| UTF-8 | Python | false | false | 1,062 | py | 79 | viewsets.py | 34 | 0.725047 | 0.725047 | 0 | 33 | 31.181818 | 78 |
YogeshMorpho/Reuters-newswire-class-dataset-sample | 18,554,258,735,572 | 1ccebf68c1e2a97dac7a93f5f2520ef5ebd01105 | c7effa1c7dd34d785aad367bd6d747c549351f41 | /Reuters newswire classification sample.py | a1a5af9f705796bbe5e1a39ccf2fb4830d2f6a93 | []
| no_license | https://github.com/YogeshMorpho/Reuters-newswire-class-dataset-sample | 21089a458fef06e027e393e3cf670688adfc0d4f | 77210e34eb7eb9cbd950cdcf35e76b7992296d6f | refs/heads/master | 2020-03-27T20:41:28.560162 | 2018-09-09T10:47:53 | 2018-09-09T10:47:53 | 147,086,833 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import keras
#import keras's Reuters Neswire Classification Dataset
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
'''
Loading the datas set and dividing it into train and test.And limiting the max words
to 1000 to get a control of the dataset and easier for execution
'''
max_words=2000
print('Loading data')
print()
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words,seed=45,test_split=0.3)
print('x_train values',x_train[0:10])
print('x_test values',x_test[0:10])
print('y_train values',y_train[0:10])
print('y_test values',y_test[0:10])
print('length of x_train sequences',len(x_train))
print('length of x_test sequences',len(x_test))
'''
Now we are calculating the number of classes because it is required to categorize
We are adding one here because because the indexing starts from zer
'''
num_classes = np.max(y_train) + 1
print()
print(num_classes, 'classes')
print()
'''
We are using tozeniser api here and sequence function.
We initialize the model by using the sequential() function and then keep adding layers to this mode.
'''
print()
print('Sequence that has to be vectorized')
tokenizer = Tokenizer(num_words=max_words)
x_train = tokenizer.sequences_to_matrix(x_train)
x_test = tokenizer.sequences_to_matrix(x_test)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print("************")
print()
#Now x being vectorised we have to vectorize y in a 1D matrix
print('*******************')
print('Converting the class vector to binary class matrix')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
print('*******************')
print()
'''
Now we have the fully connected layer with 700 hidden layer units
'''
print()
print('Building model')
model = Sequential()
model.add(Dense(700, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
#Now compiling the model
print('Compiling model')
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
print('Fitting the data to the model')
batch_size = 20
epochs = 5
#We are giving the epoch as 5, as we dont want the model to learn too much which might cause a deviation from the accuracy
history = model.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_split=0.1)
print('Evaluating the test data on the model')
score = model.evaluate(x_test, y_test,batch_size=batch_size, verbose=1)
print('Test accuracy:', score[1]) | UTF-8 | Python | false | false | 2,767 | py | 1 | Reuters newswire classification sample.py | 1 | 0.735454 | 0.719913 | 0 | 75 | 35.906667 | 122 |
M1koto/sc-backend | 352,187,344,078 | 6eb5e995021b6591cafa0dd7ad01d96d1d9f8fed | 40264435d0a9cd3474cef1b5df8977e845b749d0 | /models/metadata.py | 95fc99b63fcc78debe5c55c2f59a79017516cae4 | [
"MIT"
]
| permissive | https://github.com/M1koto/sc-backend | cb38e7aec850874ef35cdce306212a98e4fb1adf | 8faeaec0d7bc4c0308e872b9972bccfabeae0402 | refs/heads/master | 2023-07-26T07:31:07.507834 | 2021-05-30T06:11:39 | 2021-06-01T02:39:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mongoengine as mongo
import mongoengine_goodjson as gj
class Tag(gj.Document):
id = mongo.IntField(required=True, primary_key=True)
name = mongo.StringField(required=True)
meta = {'auto_create_index': False}
class NumUsersTag(gj.Document):
id = mongo.IntField(required=True, primary_key=True)
value = mongo.StringField(required=True)
meta = {'auto_create_index': False}
class Major(gj.Document):
id = mongo.IntField(required=True, primary_key=True)
major = mongo.StringField(required=True)
meta = {'auto_create_index': False}
class Minor(gj.Document):
id = mongo.IntField(required=True, primary_key=True)
minor = mongo.StringField(required=True)
meta = {'auto_create_index': False}
class StudentYear(gj.Document):
id = mongo.IntField(required=True, primary_key=True)
year = mongo.StringField(required=True)
meta = {'auto_create_index': False}
| UTF-8 | Python | false | false | 935 | py | 36 | metadata.py | 28 | 0.696257 | 0.696257 | 0 | 32 | 28.21875 | 59 |
kaushikriya/Python-Machine-Learning | 16,887,811,407,980 | 624c19e4953fb9e4664712026839b7e228dc36c0 | 3430896f91092b3f8ea800a86a226ab9085afd72 | /SVMHeart.py | 562ab6fe118a78178d08ced0995eef3c5c6fe449 | []
| no_license | https://github.com/kaushikriya/Python-Machine-Learning | 65c3d7fe7a776bcfbab8828e54e1c297d4823dcc | fe27af73aad8ef345b5db74cf3dec9fa77756139 | refs/heads/master | 2020-04-27T09:30:54.748195 | 2019-03-06T21:09:41 | 2019-03-06T21:09:41 | 174,217,737 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
#Loading data
data=pd.read_csv('D:\heart.csv')
#Ceating feature and target data set
X=data[['age','sex','cp','trestbps','chol','thalach']].values
Y=data[['target']].values
print(data.dtypes)
#Creating training and testing data sets
from sklearn.model_selection import train_test_split
XTrain,XTest,YTrain,YTest=train_test_split(X,Y,test_size=0.2,random_state=4)
print('Shape of training set: ',XTrain.shape,YTrain.shape)
print('Shape of testing set: ',XTest.shape,YTest.shape)
#Preparing model
from sklearn import svm
Model=svm.SVC(kernel='rbf')
Model.fit(XTrain,YTrain)
#Using model for prediction
result=Model.predict(XTest)
#Evaluation of accuracy
from sklearn.metrics import jaccard_similarity_score
print('Jaccard similaity score of training set: ',jaccard_similarity_score(YTrain,Model.predict(XTrain)))
print('Jaccard similarity score of testing set: ',jaccard_similarity_score(YTest,result))
from sklearn.metrics import f1_score
print('F1 score of training set: ',f1_score(YTrain,Model.predict(XTrain),average='weighted'))
print('F1 score of testing set: ',f1_score(YTest,result,average='weighted')) | UTF-8 | Python | false | false | 1,135 | py | 6 | SVMHeart.py | 5 | 0.772687 | 0.765639 | 0 | 33 | 33.424242 | 105 |
rafaelperazzo/programacao-web | 9,045,201,163,975 | 63ee1b3ca2634845ec897172ef9364960ecc0c7a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/111/usersdata/165/63648/submittedfiles/av2_p3_m2.py | 0fa499fecc667148e4d9a53f16b17c549fe6dce6 | []
| no_license | https://github.com/rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
def descobrirm(a):
s1=0
s2=0
s3=0
for i in range(0,a.shape[0],1):
s1=s1+a[i,0]
s2=s2+a[i,1]
s3=s3+a[i,2]
if s1==s2:
return(s1)
else:
return(s3)
def somalinhaerrada(a,m):
b=[]
for i in range(0,a.shape[0],1):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[i,j]
b.append(soma)
for r in range(0,len(b),1):
if b[i]!=m:
return i
def somacolunaerrada(a,m):
b=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
b.append(soma)
| UTF-8 | Python | false | false | 718 | py | 17,467 | av2_p3_m2.py | 16,712 | 0.447075 | 0.392758 | 0 | 34 | 19.588235 | 39 |
h3lio5/MetaDropout-pytorch | 6,777,458,412,010 | 548e4dc63fb8297d892e8822b5b879cebe930053 | 2525db70007efd777226132745d102cde330e2c3 | /utils.py | 21966674bc8e22dd6d86ca85db6685be06972f30 | []
| no_license | https://github.com/h3lio5/MetaDropout-pytorch | 1fa4d65707bffe3b19e9757d6d423026cea4f25a | a0a6532b601dfc296bdbaebfc5d66881d93f6582 | refs/heads/main | 2023-03-23T18:44:32.339438 | 2021-03-12T10:46:08 | 2021-03-12T10:46:08 | 330,917,409 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from model import MetaDropout
import torch
from torchmeta.modules import MetaModule
from main import get_args
from collections import OrderedDict
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
return accuracy.item()
def tensors_to_device(tensors, device=torch.device('cpu')):
"""Place a collection of tensors in a specific device"""
if isinstance(tensors, torch.Tensor):
return tensors.to(device=device)
elif isinstance(tensors, (list, tuple)):
return type(tensors)(tensors_to_device(tensor, device=device)
for tensor in tensors)
elif isinstance(tensors, (dict, OrderedDict)):
return type(tensors)([(name, tensors_to_device(tensor, device=device))
for (name, tensor) in tensors.items()])
else:
raise NotImplementedError()
def gradient_update_parameters(model,
loss,
params=None,
step_size=0.5,
first_order=False):
"""Update of the meta-parameters with one step of gradient descent on the
loss function.
"""
if not isinstance(model, MetaModule):
raise ValueError('The model must be an instance of `torchmeta.modules.'
'MetaModule`, got `{0}`'.format(type(model)))
if params is None:
params = OrderedDict(model.meta_named_parameters())
print("before grad")
grads = torch.autograd.grad(loss, params.values(), create_graph=True)
print("after grad")
updated_params = OrderedDict()
for (name, param), grad in zip(params.items(), grads):
if name.find('noise') == -1:
updated_params[name] = param - step_size * grad
else:
updated_params[name] = param
return updated_params
| UTF-8 | Python | false | false | 2,002 | py | 7 | utils.py | 5 | 0.607393 | 0.604895 | 0 | 57 | 34.122807 | 79 |
lianilychee/SoftwareDesign | 14,302,241,140,111 | 5c2e764d08771b0b6515e8c243ccf6295b3865e5 | cea5a7b36f871614122958537c1f3c2b35f858a3 | /hw2/fermat.py | 84937c60213f6490752f1dd11aa6e2ae09c2e91f | [
"MIT"
]
| permissive | https://github.com/lianilychee/SoftwareDesign | 9b57a7e5ff299ad647f9222f82452708070898b9 | 24e43fc5b7dd36581f011350ae9bcbe0b630373d | refs/heads/master | 2021-01-17T08:02:25.836892 | 2014-11-11T19:10:52 | 2014-11-11T19:10:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Liani Lye, HW 02
SoftDes Fall '14
Due 09.16.14
'''
a = int(raw_input("enter value for a: "));
b = int(raw_input("enter value for b: "));
c = int(raw_input("enter value for c: "));
n = int(raw_input("enter value for n: "));
def check_fermat():
if (a**n + b**n) == c**n:
print "Fermat's theorem holds."
else:
print "That doesn't work."
check_fermat() | UTF-8 | Python | false | false | 361 | py | 30 | fermat.py | 15 | 0.603878 | 0.576177 | 0 | 18 | 19.111111 | 42 |
cosmicBboy/ml-research | 3,745,211,488,681 | ee18f724cda696fa076c89bfa39e4d207cb3dad3 | bae5f696b76af428fb5555c147c4f1bcff1bb62e | /metalearn/metalearn/data_environments/feature_maps/kaggle_homesite_quote_conversion.py | 40fb334f56ddf6ec1b364a96b45e82584283c5cc | [
"MIT"
]
| permissive | https://github.com/cosmicBboy/ml-research | 1e309f881f9810e7a82a262d625db5d684752705 | 04fd31f68e7a44152caf6eaaf66ab59f136dd8f5 | refs/heads/master | 2021-01-24T09:58:25.662826 | 2020-08-10T22:08:23 | 2020-08-10T22:08:23 | 123,030,133 | 8 | 4 | MIT | false | 2019-06-29T20:13:37 | 2018-02-26T21:03:02 | 2019-06-24T00:55:51 | 2019-06-29T20:13:37 | 52,791 | 6 | 0 | 1 | Jupyter Notebook | false | false | """Feature map for the kaggle competition 'homesite quote conversion."""
from collections import OrderedDict
from ...data_types import FeatureType
def get_feature_map():
return OrderedDict([
("Original_Quote_Date", FeatureType.DATE),
("Field6", FeatureType.CATEGORICAL),
("Field7", FeatureType.CONTINUOUS),
("Field8", FeatureType.CONTINUOUS),
("Field9", FeatureType.CONTINUOUS),
("Field10", FeatureType.CONTINUOUS),
("Field11", FeatureType.CONTINUOUS),
("Field12", FeatureType.CATEGORICAL),
("CoverageField1A", FeatureType.CONTINUOUS),
("CoverageField1B", FeatureType.CONTINUOUS),
("CoverageField2A", FeatureType.CONTINUOUS),
("CoverageField2B", FeatureType.CONTINUOUS),
("CoverageField3A", FeatureType.CONTINUOUS),
("CoverageField3B", FeatureType.CONTINUOUS),
("CoverageField4A", FeatureType.CONTINUOUS),
("CoverageField4B", FeatureType.CONTINUOUS),
("CoverageField5A", FeatureType.CONTINUOUS),
("CoverageField5B", FeatureType.CONTINUOUS),
("CoverageField6A", FeatureType.CONTINUOUS),
("CoverageField6B", FeatureType.CONTINUOUS),
("CoverageField8", FeatureType.CATEGORICAL),
("CoverageField9", FeatureType.CATEGORICAL),
("CoverageField11A", FeatureType.CONTINUOUS),
("CoverageField11B", FeatureType.CONTINUOUS),
("SalesField1A", FeatureType.CONTINUOUS),
("SalesField1B", FeatureType.CONTINUOUS),
("SalesField2A", FeatureType.CONTINUOUS),
("SalesField2B", FeatureType.CONTINUOUS),
("SalesField3", FeatureType.CONTINUOUS),
("SalesField4", FeatureType.CONTINUOUS),
("SalesField5", FeatureType.CONTINUOUS),
("SalesField6", FeatureType.CONTINUOUS),
("SalesField7", FeatureType.CATEGORICAL),
("SalesField8", FeatureType.CONTINUOUS),
("SalesField9", FeatureType.CONTINUOUS),
("SalesField10", FeatureType.CONTINUOUS),
("SalesField11", FeatureType.CONTINUOUS),
("SalesField12", FeatureType.CONTINUOUS),
("SalesField13", FeatureType.CONTINUOUS),
("SalesField14", FeatureType.CONTINUOUS),
("SalesField15", FeatureType.CONTINUOUS),
("PersonalField1", FeatureType.CONTINUOUS),
("PersonalField2", FeatureType.CONTINUOUS),
("PersonalField4A", FeatureType.CONTINUOUS),
("PersonalField4B", FeatureType.CONTINUOUS),
("PersonalField5", FeatureType.CONTINUOUS),
("PersonalField6", FeatureType.CONTINUOUS),
("PersonalField7", FeatureType.CATEGORICAL),
("PersonalField8", FeatureType.CONTINUOUS),
("PersonalField9", FeatureType.CONTINUOUS),
("PersonalField10A", FeatureType.CONTINUOUS),
("PersonalField10B", FeatureType.CONTINUOUS),
("PersonalField11", FeatureType.CONTINUOUS),
("PersonalField12", FeatureType.CONTINUOUS),
("PersonalField13", FeatureType.CONTINUOUS),
("PersonalField14", FeatureType.CONTINUOUS),
("PersonalField15", FeatureType.CONTINUOUS),
("PersonalField16", FeatureType.CATEGORICAL),
("PersonalField17", FeatureType.CATEGORICAL),
("PersonalField18", FeatureType.CATEGORICAL),
("PersonalField19", FeatureType.CATEGORICAL),
("PersonalField22", FeatureType.CONTINUOUS),
("PersonalField23", FeatureType.CONTINUOUS),
("PersonalField24", FeatureType.CONTINUOUS),
("PersonalField25", FeatureType.CONTINUOUS),
("PersonalField26", FeatureType.CONTINUOUS),
("PersonalField27", FeatureType.CONTINUOUS),
("PersonalField28", FeatureType.CONTINUOUS),
("PersonalField29", FeatureType.CONTINUOUS),
("PersonalField30", FeatureType.CONTINUOUS),
("PersonalField31", FeatureType.CONTINUOUS),
("PersonalField32", FeatureType.CONTINUOUS),
("PersonalField33", FeatureType.CONTINUOUS),
("PersonalField34", FeatureType.CONTINUOUS),
("PersonalField35", FeatureType.CONTINUOUS),
("PersonalField36", FeatureType.CONTINUOUS),
("PersonalField37", FeatureType.CONTINUOUS),
("PersonalField38", FeatureType.CONTINUOUS),
("PersonalField39", FeatureType.CONTINUOUS),
("PersonalField40", FeatureType.CONTINUOUS),
("PersonalField41", FeatureType.CONTINUOUS),
("PersonalField42", FeatureType.CONTINUOUS),
("PersonalField43", FeatureType.CONTINUOUS),
("PersonalField44", FeatureType.CONTINUOUS),
("PersonalField45", FeatureType.CONTINUOUS),
("PersonalField46", FeatureType.CONTINUOUS),
("PersonalField47", FeatureType.CONTINUOUS),
("PersonalField48", FeatureType.CONTINUOUS),
("PersonalField49", FeatureType.CONTINUOUS),
("PersonalField50", FeatureType.CONTINUOUS),
("PersonalField51", FeatureType.CONTINUOUS),
("PersonalField52", FeatureType.CONTINUOUS),
("PersonalField53", FeatureType.CONTINUOUS),
("PersonalField54", FeatureType.CONTINUOUS),
("PersonalField55", FeatureType.CONTINUOUS),
("PersonalField56", FeatureType.CONTINUOUS),
("PersonalField57", FeatureType.CONTINUOUS),
("PersonalField58", FeatureType.CONTINUOUS),
("PersonalField59", FeatureType.CONTINUOUS),
("PersonalField60", FeatureType.CONTINUOUS),
("PersonalField61", FeatureType.CONTINUOUS),
("PersonalField62", FeatureType.CONTINUOUS),
("PersonalField63", FeatureType.CONTINUOUS),
("PersonalField64", FeatureType.CONTINUOUS),
("PersonalField65", FeatureType.CONTINUOUS),
("PersonalField66", FeatureType.CONTINUOUS),
("PersonalField67", FeatureType.CONTINUOUS),
("PersonalField68", FeatureType.CONTINUOUS),
("PersonalField69", FeatureType.CONTINUOUS),
("PersonalField70", FeatureType.CONTINUOUS),
("PersonalField71", FeatureType.CONTINUOUS),
("PersonalField72", FeatureType.CONTINUOUS),
("PersonalField73", FeatureType.CONTINUOUS),
("PersonalField74", FeatureType.CONTINUOUS),
("PersonalField75", FeatureType.CONTINUOUS),
("PersonalField76", FeatureType.CONTINUOUS),
("PersonalField77", FeatureType.CONTINUOUS),
("PersonalField78", FeatureType.CONTINUOUS),
("PersonalField79", FeatureType.CONTINUOUS),
("PersonalField80", FeatureType.CONTINUOUS),
("PersonalField81", FeatureType.CONTINUOUS),
("PersonalField82", FeatureType.CONTINUOUS),
("PersonalField83", FeatureType.CONTINUOUS),
("PersonalField84", FeatureType.CATEGORICAL),
("PropertyField1A", FeatureType.CONTINUOUS),
("PropertyField1B", FeatureType.CONTINUOUS),
("PropertyField2A", FeatureType.CONTINUOUS),
("PropertyField2B", FeatureType.CONTINUOUS),
("PropertyField3", FeatureType.CATEGORICAL),
("PropertyField4", FeatureType.CATEGORICAL),
("PropertyField5", FeatureType.CATEGORICAL),
("PropertyField6", FeatureType.CONTINUOUS),
("PropertyField7", FeatureType.CATEGORICAL),
("PropertyField8", FeatureType.CONTINUOUS),
("PropertyField9", FeatureType.CONTINUOUS),
("PropertyField10", FeatureType.CONTINUOUS),
("PropertyField11A", FeatureType.CONTINUOUS),
("PropertyField11B", FeatureType.CONTINUOUS),
("PropertyField12", FeatureType.CONTINUOUS),
("PropertyField13", FeatureType.CONTINUOUS),
("PropertyField14", FeatureType.CATEGORICAL),
("PropertyField15", FeatureType.CONTINUOUS),
("PropertyField16A", FeatureType.CONTINUOUS),
("PropertyField16B", FeatureType.CONTINUOUS),
("PropertyField17", FeatureType.CONTINUOUS),
("PropertyField18", FeatureType.CONTINUOUS),
("PropertyField19", FeatureType.CONTINUOUS),
("PropertyField20", FeatureType.CONTINUOUS),
("PropertyField21A", FeatureType.CONTINUOUS),
("PropertyField21B", FeatureType.CONTINUOUS),
("PropertyField22", FeatureType.CONTINUOUS),
("PropertyField23", FeatureType.CONTINUOUS),
("PropertyField24A", FeatureType.CONTINUOUS),
("PropertyField24B", FeatureType.CONTINUOUS),
("PropertyField25", FeatureType.CONTINUOUS),
("PropertyField26A", FeatureType.CONTINUOUS),
("PropertyField26B", FeatureType.CONTINUOUS),
("PropertyField27", FeatureType.CONTINUOUS),
("PropertyField28", FeatureType.CATEGORICAL),
("PropertyField29", FeatureType.CATEGORICAL),
("PropertyField30", FeatureType.CATEGORICAL),
("PropertyField31", FeatureType.CATEGORICAL),
("PropertyField32", FeatureType.CATEGORICAL),
("PropertyField33", FeatureType.CATEGORICAL),
("PropertyField34", FeatureType.CATEGORICAL),
("PropertyField35", FeatureType.CONTINUOUS),
("PropertyField36", FeatureType.CATEGORICAL),
("PropertyField37", FeatureType.CATEGORICAL),
("PropertyField38", FeatureType.CATEGORICAL),
("PropertyField39A", FeatureType.CONTINUOUS),
("PropertyField39B", FeatureType.CONTINUOUS),
("GeographicField1A", FeatureType.CONTINUOUS),
("GeographicField1B", FeatureType.CONTINUOUS),
("GeographicField2A", FeatureType.CONTINUOUS),
("GeographicField2B", FeatureType.CONTINUOUS),
("GeographicField3A", FeatureType.CONTINUOUS),
("GeographicField3B", FeatureType.CONTINUOUS),
("GeographicField4A", FeatureType.CONTINUOUS),
("GeographicField4B", FeatureType.CONTINUOUS),
("GeographicField5A", FeatureType.CONTINUOUS),
("GeographicField5B", FeatureType.CONTINUOUS),
("GeographicField6A", FeatureType.CONTINUOUS),
("GeographicField6B", FeatureType.CONTINUOUS),
("GeographicField7A", FeatureType.CONTINUOUS),
("GeographicField7B", FeatureType.CONTINUOUS),
("GeographicField8A", FeatureType.CONTINUOUS),
("GeographicField8B", FeatureType.CONTINUOUS),
("GeographicField9A", FeatureType.CONTINUOUS),
("GeographicField9B", FeatureType.CONTINUOUS),
("GeographicField10A", FeatureType.CONTINUOUS),
("GeographicField10B", FeatureType.CONTINUOUS),
("GeographicField11A", FeatureType.CONTINUOUS),
("GeographicField11B", FeatureType.CONTINUOUS),
("GeographicField12A", FeatureType.CONTINUOUS),
("GeographicField12B", FeatureType.CONTINUOUS),
("GeographicField13A", FeatureType.CONTINUOUS),
("GeographicField13B", FeatureType.CONTINUOUS),
("GeographicField14A", FeatureType.CONTINUOUS),
("GeographicField14B", FeatureType.CONTINUOUS),
("GeographicField15A", FeatureType.CONTINUOUS),
("GeographicField15B", FeatureType.CONTINUOUS),
("GeographicField16A", FeatureType.CONTINUOUS),
("GeographicField16B", FeatureType.CONTINUOUS),
("GeographicField17A", FeatureType.CONTINUOUS),
("GeographicField17B", FeatureType.CONTINUOUS),
("GeographicField18A", FeatureType.CONTINUOUS),
("GeographicField18B", FeatureType.CONTINUOUS),
("GeographicField19A", FeatureType.CONTINUOUS),
("GeographicField19B", FeatureType.CONTINUOUS),
("GeographicField20A", FeatureType.CONTINUOUS),
("GeographicField20B", FeatureType.CONTINUOUS),
("GeographicField21A", FeatureType.CONTINUOUS),
("GeographicField21B", FeatureType.CONTINUOUS),
("GeographicField22A", FeatureType.CONTINUOUS),
("GeographicField22B", FeatureType.CONTINUOUS),
("GeographicField23A", FeatureType.CONTINUOUS),
("GeographicField23B", FeatureType.CONTINUOUS),
("GeographicField24A", FeatureType.CONTINUOUS),
("GeographicField24B", FeatureType.CONTINUOUS),
("GeographicField25A", FeatureType.CONTINUOUS),
("GeographicField25B", FeatureType.CONTINUOUS),
("GeographicField26A", FeatureType.CONTINUOUS),
("GeographicField26B", FeatureType.CONTINUOUS),
("GeographicField27A", FeatureType.CONTINUOUS),
("GeographicField27B", FeatureType.CONTINUOUS),
("GeographicField28A", FeatureType.CONTINUOUS),
("GeographicField28B", FeatureType.CONTINUOUS),
("GeographicField29A", FeatureType.CONTINUOUS),
("GeographicField29B", FeatureType.CONTINUOUS),
("GeographicField30A", FeatureType.CONTINUOUS),
("GeographicField30B", FeatureType.CONTINUOUS),
("GeographicField31A", FeatureType.CONTINUOUS),
("GeographicField31B", FeatureType.CONTINUOUS),
("GeographicField32A", FeatureType.CONTINUOUS),
("GeographicField32B", FeatureType.CONTINUOUS),
("GeographicField33A", FeatureType.CONTINUOUS),
("GeographicField33B", FeatureType.CONTINUOUS),
("GeographicField34A", FeatureType.CONTINUOUS),
("GeographicField34B", FeatureType.CONTINUOUS),
("GeographicField35A", FeatureType.CONTINUOUS),
("GeographicField35B", FeatureType.CONTINUOUS),
("GeographicField36A", FeatureType.CONTINUOUS),
("GeographicField36B", FeatureType.CONTINUOUS),
("GeographicField37A", FeatureType.CONTINUOUS),
("GeographicField37B", FeatureType.CONTINUOUS),
("GeographicField38A", FeatureType.CONTINUOUS),
("GeographicField38B", FeatureType.CONTINUOUS),
("GeographicField39A", FeatureType.CONTINUOUS),
("GeographicField39B", FeatureType.CONTINUOUS),
("GeographicField40A", FeatureType.CONTINUOUS),
("GeographicField40B", FeatureType.CONTINUOUS),
("GeographicField41A", FeatureType.CONTINUOUS),
("GeographicField41B", FeatureType.CONTINUOUS),
("GeographicField42A", FeatureType.CONTINUOUS),
("GeographicField42B", FeatureType.CONTINUOUS),
("GeographicField43A", FeatureType.CONTINUOUS),
("GeographicField43B", FeatureType.CONTINUOUS),
("GeographicField44A", FeatureType.CONTINUOUS),
("GeographicField44B", FeatureType.CONTINUOUS),
("GeographicField45A", FeatureType.CONTINUOUS),
("GeographicField45B", FeatureType.CONTINUOUS),
("GeographicField46A", FeatureType.CONTINUOUS),
("GeographicField46B", FeatureType.CONTINUOUS),
("GeographicField47A", FeatureType.CONTINUOUS),
("GeographicField47B", FeatureType.CONTINUOUS),
("GeographicField48A", FeatureType.CONTINUOUS),
("GeographicField48B", FeatureType.CONTINUOUS),
("GeographicField49A", FeatureType.CONTINUOUS),
("GeographicField49B", FeatureType.CONTINUOUS),
("GeographicField50A", FeatureType.CONTINUOUS),
("GeographicField50B", FeatureType.CONTINUOUS),
("GeographicField51A", FeatureType.CONTINUOUS),
("GeographicField51B", FeatureType.CONTINUOUS),
("GeographicField52A", FeatureType.CONTINUOUS),
("GeographicField52B", FeatureType.CONTINUOUS),
("GeographicField53A", FeatureType.CONTINUOUS),
("GeographicField53B", FeatureType.CONTINUOUS),
("GeographicField54A", FeatureType.CONTINUOUS),
("GeographicField54B", FeatureType.CONTINUOUS),
("GeographicField55A", FeatureType.CONTINUOUS),
("GeographicField55B", FeatureType.CONTINUOUS),
("GeographicField56A", FeatureType.CONTINUOUS),
("GeographicField56B", FeatureType.CONTINUOUS),
("GeographicField57A", FeatureType.CONTINUOUS),
("GeographicField57B", FeatureType.CONTINUOUS),
("GeographicField58A", FeatureType.CONTINUOUS),
("GeographicField58B", FeatureType.CONTINUOUS),
("GeographicField59A", FeatureType.CONTINUOUS),
("GeographicField59B", FeatureType.CONTINUOUS),
("GeographicField60A", FeatureType.CONTINUOUS),
("GeographicField60B", FeatureType.CONTINUOUS),
("GeographicField61A", FeatureType.CONTINUOUS),
("GeographicField61B", FeatureType.CONTINUOUS),
("GeographicField62A", FeatureType.CONTINUOUS),
("GeographicField62B", FeatureType.CONTINUOUS),
("GeographicField63", FeatureType.CATEGORICAL),
("GeographicField64", FeatureType.CATEGORICAL),
])
| UTF-8 | Python | false | false | 16,213 | py | 124 | kaggle_homesite_quote_conversion.py | 78 | 0.685068 | 0.652686 | 0 | 307 | 51.811075 | 72 |
JennifferLockwood/sqlalchemy-challenge | 12,979,391,191,048 | 71cdfdf8440ce3647b6304ee134bab1316e2c8d0 | 4d07711006706655a82a8fdb3809e606751260ad | /climate-app.py | 13517df26402d6eed540329df891ece6df58f20e | []
| no_license | https://github.com/JennifferLockwood/sqlalchemy-challenge | 79077b9ab4fc3c458daf926ca59585c9aba9cb34 | b227048bbbb316359a63250ab8f115960012f2fb | refs/heads/master | 2020-09-12T01:06:22.826627 | 2019-11-23T14:37:33 | 2019-11-23T14:37:33 | 222,250,328 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Welcome to the Hawaii Climate API!<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end><br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return the Precipitation dictionary"""
# Query date and prcp
results = session.query(Measurement.date, Measurement.prcp).\
order_by(Measurement.date.desc()).all()
session.close()
# Create a dictionary from the raw data and append to a list of prcpData
prcpData = []
for date, prcp in results:
prcpDict = {}
prcpDict["date"] = date
prcpDict["prcp"] = prcp
prcpData.append(prcpDict)
return jsonify(prcpData)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all stations"""
# Query all stations
results = session.query(Station.name).all()
session.close()
# Convert list of tuples into normal list
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
# Find the last date in our data
last_date = session.query(Measurement.date).\
order_by(Measurement.date.desc()).first()
# Calculate the date one year ago from the last data point in the database
one_year = dt.date(2017,8,23) - dt.timedelta(days=365)
# Perform a query to retrieve the data and precipitation scores
results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= str(one_year), Measurement.date <= '2017-08-23').\
order_by(Measurement.date.desc()).all()
session.close()
# Create a dictionary from the raw data and append to a list
tempLastYear = []
for date, tobs in results:
tempDict = {}
tempDict["date"] = date
tempDict["temperature"] = tobs
tempLastYear.append(tempDict)
return jsonify(tempLastYear)
@app.route('/api/v1.0/<start>/')
def startDate(start):
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(Measurement.date, func.avg(Measurement.tobs),
func.max(Measurement.tobs), func.min(Measurement.tobs)).\
filter(Measurement.date == start).all()
session.close()
# Create a dictionary from the raw data and append to a list
startDataList = []
for result in results:
startDataDict = {}
startDataDict['Date'] = result[0]
startDataDict['Average Temperature'] = result[1]
startDataDict['Highest Temperature'] = result[2]
startDataDict['Lowest Temperature'] = result[3]
startDataList.append(startDataDict)
return jsonify(startDataList)
@app.route('/api/v1.0/<start>/<end>/')
def query_dates(start, end):
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs),
func.min(Measurement.tobs)).\
filter(Measurement.date >= start, Measurement.date <= end).all()
session.close()
# Create a dictionary from the raw data and append to a list
datesDataList = []
for result in results:
datesDataDict = {}
datesDataDict["Start Date"] = start
datesDataDict["End Date"] = end
datesDataDict["Average Temperature"] = result[0]
datesDataDict["Highest Temperature"] = result[1]
datesDataDict["Lowest Temperature"] = result[2]
datesDataList.append(datesDataDict)
return jsonify(datesDataList)
if __name__ == '__main__':
app.run(debug=False)
| UTF-8 | Python | false | false | 4,958 | py | 2 | climate-app.py | 1 | 0.614361 | 0.605284 | 0 | 167 | 28.688623 | 85 |
EnderDas/The-Mage | 10,256,381,938,241 | 1decb42879a68f723d9200b33d45dccf6a9d5310 | f2304cc80c24d1580fc80fd06947050f2d36308d | /The-Mage/API/__init__.py | 931190dac68d6fa1f42a246efb345ae2158db046 | [
"Apache-2.0"
]
| permissive | https://github.com/EnderDas/The-Mage | 47d3f6af3198bdda057e7581dd0d303b0f5f6463 | 58699b79e618212ed99d8b3c3c12f86a446d1cc8 | refs/heads/master | 2021-09-15T03:20:58.619645 | 2018-05-25T03:45:34 | 2018-05-25T03:45:34 | 111,168,431 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #__init__.py
from .armor import *
from .inventory import *
from .item import *
from .player import *
from .skill import *
from .weapon import *
| UTF-8 | Python | false | false | 144 | py | 11 | __init__.py | 10 | 0.701389 | 0.701389 | 0 | 7 | 19.571429 | 24 |
lucasolmostito/empresa-zapatos-bernini | 3,496,103,398,975 | dbd97f6f0d69e46e5a553f28ecbdd987359c6ffd | 2b2dc71f73bb553e83eb51907565186c5ecf6e8d | /zapatos_bernini/applications/orders/migrations/0001_initial.py | 350abe435a60ee80b3bcca38733dccd355ea5482 | []
| no_license | https://github.com/lucasolmostito/empresa-zapatos-bernini | 47be36d7aaad73bc3a9aa5cd97bdc4ac54bb353d | 5624c51b843f960a2fd24400eb6da137900fa338 | refs/heads/master | 2023-06-19T05:48:21.439339 | 2021-07-07T20:01:21 | 2021-07-07T20:01:21 | 383,903,850 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.5 on 2021-07-07 19:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('items', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('date_sale', models.DateTimeField()),
('total_quantity', models.PositiveIntegerField()),
('total_amount', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Monto total')),
('canceled', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Pedido',
'verbose_name_plural': 'Pedidos',
},
),
migrations.CreateModel(
name='OrderDetail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('quantity', models.PositiveIntegerField(verbose_name='Cantidad de un producto')),
('price', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Precio')),
('canceled', models.BooleanField(default=False)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.item')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.order')),
],
options={
'verbose_name': 'Detalle de pedido',
'verbose_name_plural': 'Detalles de pedido',
},
),
]
| UTF-8 | Python | false | false | 2,077 | py | 24 | 0001_initial.py | 21 | 0.56235 | 0.551276 | 0 | 49 | 41.387755 | 117 |
justalemon/LimeBot | 3,934,190,068,706 | e1123a345bf2e9f048894598fdb7687598533cbc | bbf10736c2d5339ee137c391e5077222aceca594 | /superlime/bot.py | 627d6dbdd9c11f5480958436e27763079a408991 | [
"MIT"
]
| permissive | https://github.com/justalemon/LimeBot | 62da1432d15164949d4f1b2c894cce356da87e2c | 89153007be0a94038b430a1f4605d66731922e29 | HEAD | 2019-03-17T21:22:47.177860 | 2018-05-05T16:25:20 | 2018-05-05T16:25:20 | 123,289,986 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import importlib
import inspect
import logging
from .commands import Native
from discord.ext import commands
logger = logging.getLogger(__name__)
class SuperLime(commands.AutoShardedBot):
def __init__(self, *args, **kwargs):
"""SuperLime's Bot class."""
logger.debug("Main class initialization")
# Run the Bot as usual with the same arguments
super().__init__(*args, **kwargs)
# To save and the config later
self.config = None
# Add the cog with the custom SuperLime's commands
self.add_cog(Native(self))
def load_extension(self, name):
"""Loads an extension from a file or Python Library."""
logger.info("Loading cogs from {0}".format(name))
lib = importlib.import_module(name)
for name, obj in inspect.getmembers(lib, inspect.isclass):
if len(inspect.getfullargspec(obj).args) == 2: # ["self", "bot"]
if inspect.getfullargspec(obj).args[1] == "bot":
self.add_cog(obj(self))
self.extensions[name] = lib
| UTF-8 | Python | false | false | 1,074 | py | 8 | bot.py | 5 | 0.621043 | 0.61825 | 0 | 34 | 30.588235 | 77 |
evbeda/suppliers_management | 1,735,166,803,207 | 3a83ed611aaf0d23df4f66ac86a902ac0b1e7918 | 58f63776b6e150fdd6dd2404f3f5d73b19d3bcb0 | /supplier_app/filters.py | d650565646224c6b50e165e784063fd22de5d65e | []
| no_license | https://github.com/evbeda/suppliers_management | 9360a2ce70f67aa4994bcc26d9da91252a03cecb | ce3966ba13a292f62904579b7bf5947ddd540d14 | refs/heads/master | 2022-12-14T05:08:22.408725 | 2020-07-24T13:41:32 | 2020-07-24T13:50:06 | 206,798,693 | 4 | 1 | null | false | 2022-12-08T06:07:54 | 2019-09-06T13:22:30 | 2022-10-29T07:14:56 | 2022-12-08T06:07:52 | 2,124 | 2 | 0 | 17 | Python | false | false | from django.forms import CheckboxSelectMultiple, Select
from django_filters import (
CharFilter,
DateFromToRangeFilter,
FilterSet,
MultipleChoiceFilter
)
from django.utils.translation import ugettext_lazy as _
from supplier_app.constants.taxpayer_status import get_taxpayer_status_choices
from supplier_app.models import TaxPayer
from utils.custom_filters import DateRangeWidget
class TaxPayerFilter(FilterSet):
taxpayer_state = MultipleChoiceFilter(
choices=get_taxpayer_status_choices,
widget=CheckboxSelectMultiple(attrs={'class': 'list-unstyled'}),
label=_("Organization state")
)
taxpayer_date = DateFromToRangeFilter()
country = CharFilter(
widget=Select(
choices=[('AR', 'Argentina'), ['BR', 'Brasil']],
attrs={'class': 'custom-select'}
),
label=_("Country")
)
class Meta:
model = TaxPayer
fields = (
'taxpayer_state',
'taxpayer_date',
'country'
)
def get_form_class(self):
form = super(FilterSet, self).get_form_class()
form.base_fields['taxpayer_date'].widget = DateRangeWidget()
return form
| UTF-8 | Python | false | false | 1,207 | py | 149 | filters.py | 105 | 0.64623 | 0.64623 | 0 | 42 | 27.738095 | 78 |
csu7617/StudyPython | 6,811,818,146,070 | c1bc3240393a5443a266e79071a2969697e2021b | 616463bd0161f4e4db882ab68e40b6ef61816225 | /MLProject/data/Instructions/ML05_RF_2花费时间说明.py | fc6e4b9b5833ac4e49222e122470cddcd46fbb83 | []
| no_license | https://github.com/csu7617/StudyPython | 9dee358865a10ba7814fcc34846bcad6af3e5319 | 0a70b86080e8344d90343186fb39f71e6c643d5c | refs/heads/master | 2020-03-19T07:30:13.825925 | 2018-06-26T05:45:49 | 2018-06-26T05:45:49 | 136,024,516 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 2018年5月22日
@author: hw
'''
'''
使用随机森林预测森林植被
'''
from sklearn.grid_search import GridSearchCV
import datetime
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
import numpy as np
'''
------------随机森林预测准确率----------
测试集准确率: 0.84
训练集准确率: 0.84(+/- 0.00)
花费时间: 366.87
'''
#默认方式解决问题
def RF_PredictForestAreas_2(X_train,y_train,X_test,y_test):
starttime = datetime.datetime.now()
#训练模型
rfc = RandomForestClassifier(n_estimators=50,min_samples_leaf=50,criterion='gini',bootstrap=True,random_state=8)
rfc.fit(X_train,y_train)
predict = rfc.predict(X_test)
print('------------随机森林预测准确率----------')
print('测试集准确率: %0.2f'%np.mean(predict==y_test.reshape(-1)))
scores = cross_val_score(rfc,X_train,y_train,scoring='accuracy',cv=10)
print('训练集准确率: %0.2f(+/- %0.2f)'%(scores.mean(),scores.std()*2))
endtime = datetime.datetime.now()
spendtime = endtime-starttime
print('花费时间: %0.2f'%spendtime.total_seconds())
#自行选择最优组合
'''
------随机森林预测准确率--------
测试集准确率: 0.84
训练集准确率:0.84(+/- 0.00)
花费时间:1717.53
'''
def RF_PredictForestAreas_1_1(X_train,y_train,X_test,y_test):
starttime = datetime.datetime.now()
#训练模型
#n_estimators: 表示树的数量
rfc = RandomForestClassifier(criterion='gini',bootstrap=True,random_state=8)
#需要选择的参数名称和候选值
tuned_parameter = [{'min_samples_leaf': [50], 'n_estimators':[50]}]
#cv交叉验证
clf = GridSearchCV(estimator=rfc,param_grid=tuned_parameter,cv = 5,n_jobs=1)
#拟合训练集
clf.fit(X_train,y_train)
predict = clf.predict(X_test)
print('------随机森林预测准确率--------')
print('测试集准确率: %0.2f'%np.mean(predict == y_test.reshape(-1)))
scores = cross_val_score(clf,X_train,y_train,scoring='accuracy',cv = 10)
print('训练集准确率:%0.2f(+/- %0.2f)'%(scores.mean(),scores.std()*2))
endtime = datetime.datetime.now()
spendtime = endtime - starttime
print('花费时间:%0.2f'%spendtime.total_seconds())
'''
------随机森林预测准确率--------
测试集准确率: 0.93
训练集准确率:0.93(+/- 0.00)
花费时间:3965.48
'''
#自行选择最优组合
def RF_PredictForestAreas_1_2(X_train,y_train,X_test,y_test):
starttime = datetime.datetime.now()
#训练模型
#n_estimators: 表示树的数量
rfc = RandomForestClassifier(criterion='gini',bootstrap=True,random_state=8)
#需要选择的参数名称和候选值
tuned_parameter = [{'min_samples_leaf': [50,5], 'n_estimators':[50]}]
#cv交叉验证
clf = GridSearchCV(estimator=rfc,param_grid=tuned_parameter,cv = 5,n_jobs=1)
#拟合训练集
clf.fit(X_train,y_train)
predict = clf.predict(X_test)
print('------随机森林预测准确率--------')
print('测试集准确率: %0.2f'%np.mean(predict == y_test.reshape(-1)))
scores = cross_val_score(clf,X_train,y_train,scoring='accuracy',cv = 10)
print('训练集准确率:%0.2f(+/- %0.2f)'%(scores.mean(),scores.std()*2))
endtime = datetime.datetime.now()
spendtime = endtime - starttime
print('花费时间:%0.2f'%spendtime.total_seconds())
'''
------随机森林预测准确率--------
测试集准确率: 0.95
训练集准确率:0.94(+/- 0.00)
花费时间:7858.24
'''
#自行选择最优组合
def RF_PredictForestAreas_1_3(X_train,y_train,X_test,y_test):
starttime = datetime.datetime.now()
#训练模型
#n_estimators: 表示树的数量
rfc = RandomForestClassifier(criterion='gini',bootstrap=True,random_state=8)
#需要选择的参数名称和候选值
tuned_parameter = [{'min_samples_leaf': [50,5,2,3], 'n_estimators':[50]}]
#cv交叉验证
clf = GridSearchCV(estimator=rfc,param_grid=tuned_parameter,cv = 5,n_jobs=1)
#拟合训练集
clf.fit(X_train,y_train)
predict = clf.predict(X_test)
print('------随机森林预测准确率--------')
print('测试集准确率: %0.2f'%np.mean(predict == y_test.reshape(-1)))
scores = cross_val_score(clf,X_train,y_train,scoring='accuracy',cv = 10)
print('训练集准确率:%0.2f(+/- %0.2f)'%(scores.mean(),scores.std()*2))
endtime = datetime.datetime.now()
spendtime = endtime - starttime
print('花费时间:%0.2f'%spendtime.total_seconds())
'''
------随机森林预测准确率--------
测试集准确率: 0.95
训练集准确率:0.95(+/- 0.00)
花费时间:12406.39
'''
#自行选择最优组合
def RF_PredictForestAreas_1_4(X_train,y_train,X_test,y_test):
starttime = datetime.datetime.now()
#训练模型
#n_estimators: 表示树的数量
rfc = RandomForestClassifier(criterion='gini',bootstrap=True,random_state=8)
#需要选择的参数名称和候选值
tuned_parameter = [{ 'n_estimators':[50,10,20,100]}]
#cv交叉验证
clf = GridSearchCV(estimator=rfc,param_grid=tuned_parameter,cv = 5,n_jobs=1)
#拟合训练集
clf.fit(X_train,y_train)
predict = clf.predict(X_test)
print('------随机森林预测准确率--------')
print('测试集准确率: %0.2f'%np.mean(predict == y_test.reshape(-1)))
scores = cross_val_score(clf,X_train,y_train,scoring='accuracy',cv = 10)
print('训练集准确率:%0.2f(+/- %0.2f)'%(scores.mean(),scores.std()*2))
endtime = datetime.datetime.now()
spendtime = endtime - starttime
print('花费时间:%0.2f'%spendtime.total_seconds())
'''
------随机森林预测准确率--------
测试集准确率: 0.95
训练集准确率:0.94(+/- 0.00)
花费时间:3809.94
'''
#自行选择最优组合
def RF_PredictForestAreas_1_5(X_train,y_train,X_test,y_test):
starttime = datetime.datetime.now()
#训练模型
#n_estimators: 表示树的数量
rfc = RandomForestClassifier(criterion='gini',bootstrap=True,random_state=8)
#需要选择的参数名称和候选值
tuned_parameter = [{'min_samples_leaf': [2], 'n_estimators':[50,10,20]}]
#cv交叉验证
clf = GridSearchCV(estimator=rfc,param_grid=tuned_parameter,cv = 5,n_jobs=1)
#拟合训练集
clf.fit(X_train,y_train)
predict = clf.predict(X_test)
print('------随机森林预测准确率--------')
print('测试集准确率: %0.2f'%np.mean(predict == y_test.reshape(-1)))
scores = cross_val_score(clf,X_train,y_train,scoring='accuracy',cv = 10)
print('训练集准确率:%0.2f(+/- %0.2f)'%(scores.mean(),scores.std()*2))
endtime = datetime.datetime.now()
spendtime = endtime - starttime
print('花费时间:%0.2f'%spendtime.total_seconds())
| UTF-8 | Python | false | false | 6,884 | py | 24 | ML05_RF_2花费时间说明.py | 23 | 0.636788 | 0.596718 | 0 | 187 | 29.962567 | 116 |
nacpacheco/python-data-structures | 5,145,370,833,679 | af04c21899af97c1b0999e98e75ab7ad93a90447 | f6ce89cc14e4e18824187cc6479328dc4d75576d | /algorithms/factorial_recursion.py | 44ec2771dcc3c6ded672d6d70eb57d313d9b48fb | []
| no_license | https://github.com/nacpacheco/python-data-structures | 242d5cb7921bd3fbf56c864ebab4698f3d2fa632 | ffcc3ac3668b3a0825c80fe1d501d9466f27b5ca | refs/heads/main | 2023-01-11T18:15:25.550036 | 2020-11-15T02:01:02 | 2020-11-15T02:01:02 | 312,938,548 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # O(N)
def factorial_recursive(number):
if number == 1:
return 1
else:
return number * factorial_recursive(number-1)
# O(N)
def factorial_iteractive(number):
factorial = 1
for num in range(number,1,-1):
factorial = num * factorial
return factorial
print(factorial_recursive(1))
print(factorial_iteractive(1)) | UTF-8 | Python | false | false | 355 | py | 20 | factorial_recursion.py | 19 | 0.653521 | 0.630986 | 0 | 17 | 19.941176 | 53 |
cifpfbmoll/ollivanders-in-a-docker-pau13-loop | 17,995,912,984,947 | b70264ba8e33779f98baef5297556ca8afdf5094 | 77ac696c71aea8942f280f4b181ecfcece2ce24a | /test/test_sql/test_repository/test_get_by_quality.py | 0e28fa695edd0546f50918e1ea245e0cb36dca63 | [
"MIT"
]
| permissive | https://github.com/cifpfbmoll/ollivanders-in-a-docker-pau13-loop | 78c6d6368b72a467da536db194f0da25c67b87b1 | 0f9f2589b567c57bd56a8a4161de3043d5639a8b | refs/heads/main | 2023-05-03T10:36:26.745077 | 2021-05-07T16:12:13 | 2021-05-07T16:12:13 | 362,498,409 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import pytest
from service.service_sql.service import Service
from repository.repository_sql.models.items import Items
@pytest.mark.db_test
@pytest.mark.db_filter_by_sell_in
def test_get_by_quality_db(session):
"""Test if through session we can get an item by its quality
Args:
session (SQLAlchemy Object Session): It's the session object from SQLALchemy Instance
"""
# Añadimos el item
add_item = Items("Conjured Mana Cake", 5, 8)
session.add(add_item)
session.commit()
# Get el item por su calidad, en este caso como solo hay un item de esa calidad, retornará el del conjured mana cake
get_conjured_by_quality = session.query(Items).filter(Items.quality == 8).first()
assert get_conjured_by_quality.name == "Conjured Mana Cake"
assert get_conjured_by_quality.sell_in == 5
assert get_conjured_by_quality.quality == 8
| UTF-8 | Python | false | false | 897 | py | 128 | test_get_by_quality.py | 54 | 0.716201 | 0.710615 | 0 | 29 | 29.862069 | 120 |
pacellyjcax/ProgrammingChallenge | 7,335,804,162,402 | 54868840161f94a7e9fedc214678b925e2f37691 | cca41f3b3c88f0c154526b0df483bdea9e447fc5 | /URI/1241 - Encaixa ou Não II.py | 5c70f846e16a5236599d0ccfe73dcbee3705f699 | []
| no_license | https://github.com/pacellyjcax/ProgrammingChallenge | 499a1e4d0a67dee2bd6a2ae5ad7d4f4aee91bcd5 | 5c62fd62a44d61852fe5a13d0e2cc025a17108db | refs/heads/master | 2021-01-10T06:04:05.508209 | 2015-11-06T04:18:30 | 2015-11-06T04:18:30 | 45,658,779 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def encaixa(l):
dif = len(l[0])-len(l[1])
if dif<0:
return False
if l[0][-len(l[1]):] == l[1]:
return True
return False
n = int(raw_input())
res = []
for i in range(n):
e = raw_input().split()
if encaixa(e):
res.append("encaixa")
else:
res.append("nao encaixa")
for e in res:
print e
| UTF-8 | Python | false | false | 352 | py | 227 | 1241 - Encaixa ou Não II.py | 227 | 0.505682 | 0.488636 | 0 | 20 | 16.6 | 33 |
tbindi/hackerrank | 3,650,722,250,601 | e6a7cbe018d2c76977dd011f26c5bc51dac8baf3 | 319203fff0529393487a774e93254430f61f649c | /find_hackerrank.py | 94b374ec684bf1a64f92b0d9704f3f72fbfa2f9a | []
| no_license | https://github.com/tbindi/hackerrank | 96b431cfba966e669bf9d96f0d20027175ae3f9b | 2a9184bc9603cee9dfe57360f7791a2d438bb055 | refs/heads/master | 2016-09-01T18:22:22.300723 | 2015-01-14T11:06:03 | 2015-01-14T11:07:39 | 29,057,104 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
At HackerRank, we always want to find out how popular we are getting every day and have scraped conversations from popular sites. Each conversation fits in 1 line and there are N such conversations. Each conversation has at most 1 word that says hackerrank (all in lowercase). We would like you to help us figure out whether a conversation:
Starts with hackerrank
Ends with hackerrank
Start and ends with hackerrank
I/P:
4
i love hackerrank
hackerrank is an awesome place for programmers
hackerrank
i think hackerrank is a great place to hangout
O/P:
2
1
0
-1
'''
def find_hackerank(inp):
check = 'hackerrank'
for i in inp:
if str(check) == i[0:10] and str(check) == i[-10:]:
print 0
elif str(check) == i[0:10]:
print 1
elif str(check) == i[-10:]:
print 2
else:
print -1
if __name__ == "__main__":
T = int(raw_input())
inp = []
while T != 0:
T -= 1
inp.append(raw_input())
find_hackerank(inp) | UTF-8 | Python | false | false | 1,030 | py | 34 | find_hackerrank.py | 31 | 0.629126 | 0.606796 | 0 | 41 | 24.146341 | 340 |
IDisater/DjangoWeb | 9,062,381,019,062 | 7722f48962b2b868b53bae61bae6f4b1c5e22488 | 8d9128bbb71b0ac3b09fbe375b2f768facc81936 | /testapp/migrations/0014_auto_20191206_1427.py | 2765c5865586ce2f76553db86dbfa06dc5affb2b | []
| no_license | https://github.com/IDisater/DjangoWeb | 6fbdca948fb75e0fe22c9abf7ac0a34fb991fe29 | 4e41fceed528f10e549295ca745899c6c6fbcfb9 | refs/heads/master | 2022-12-04T11:52:11.097313 | 2020-03-26T02:09:06 | 2020-03-26T02:09:06 | 249,904,054 | 0 | 0 | null | false | 2022-11-22T04:59:25 | 2020-03-25T06:31:16 | 2020-03-26T02:09:09 | 2022-11-22T04:59:22 | 26,058 | 0 | 0 | 4 | Python | false | false | # Generated by Django 2.2.6 on 2019-12-06 14:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testapp', '0013_remove_information_cp_about'),
]
operations = [
migrations.RenameField(
model_name='information',
old_name='cp_name',
new_name='cp_title',
),
]
| UTF-8 | Python | false | false | 380 | py | 68 | 0014_auto_20191206_1427.py | 48 | 0.578947 | 0.528947 | 0 | 18 | 20.111111 | 56 |
kushkansara02/aquadrone2020 | 884,763,296,783 | 14a0c4fbc545f527392413d1e85e74ecdb00c4fb | 726333481b78ab22b67cb8ba0ddb12efd147aeab | /thruster_control/scripts/v28_thruster_connection_test.py | 1c7e5bd86c470ceff6d7c64123e08e96ef1cdad1 | []
| no_license | https://github.com/kushkansara02/aquadrone2020 | 41632f8f00f9141af4a179a1882ba5c4f41837f4 | 9df03c9bfdcbbd86f4e8273a00fb108b780fcc60 | refs/heads/dev | 2023-03-11T20:36:12.827933 | 2021-02-28T02:38:09 | 2021-02-28T02:38:09 | 342,344,117 | 0 | 0 | null | true | 2021-02-28T01:48:27 | 2021-02-25T18:41:18 | 2021-02-27T03:42:55 | 2021-02-28T01:48:26 | 994 | 0 | 0 | 0 | Python | false | false | #!/usr/bin/env python3
import rospy
import rospkg
import json
from thrust_computer.thruster_configurations import V28Configuration
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
if __name__ == "__main__":
# rospy.init_node('sim_thruster_controller')
config = V28Configuration()
pwm_frequency = 400
rospack = rospkg.RosPack()
with open(rospack.get_path('thruster_control') + "/config/thruster_gpio.json") as gpio_json:
gpio_config = json.load(gpio_json)
thrusters = []
for i in range(config.get_num_thrusters()):
ThrusterClass = config.get_thruster_class(i)
thrusters.append(ThrusterClass(pwm_frequency, i, gpio_config[i]['gpio']))
def send_command(idx, T):
thrust = FloatStamped()
thrust.data = T
thrusters[idx].apply_thrust(thrust)
THRUST = 0.5
while not rospy.is_shutdown():
for i in range(config.get_num_thrusters()):
if rospy.is_shutdown():
break
print("Testing thruster %d" % i)
for th in range(0, 30):
send_command(i, th / 10.0)
rospy.sleep(0.1)
rospy.sleep(3.0)
send_command(i, 0)
rospy.sleep(3.0)
for th in range(0, 30):
send_command(i, -th/10.0)
rospy.sleep(0.1)
rospy.sleep(3.0)
send_command(i, 0)
rospy.sleep(3.0)
| UTF-8 | Python | false | false | 1,456 | py | 100 | v28_thruster_connection_test.py | 70 | 0.572115 | 0.54739 | 0 | 56 | 25 | 96 |
love3forever/django_hello | 7,516,192,797,402 | 411f132b2d5e93982788275b196fe1a0e840e100 | f49a0d83c73310a9d199970638c6fdc4cf89b2c0 | /function_test.py | f646de7d58c8898a11886bdfddd5ad5da9245922 | []
| no_license | https://github.com/love3forever/django_hello | bf3764fb4ce67ad104b78d2d3b81f6c9d9209f94 | 5e125b21f3864c54745952a940416f814031656a | refs/heads/master | 2021-08-15T00:04:01.887093 | 2017-11-17T02:20:41 | 2017-11-17T02:20:41 | 111,040,914 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-11-16 13:45:41
# @Author : Wangmengcn (eclipse_sv@163.com)
# @Link : https://eclipsesv.com
# @Version : $Id$
from selenium import webdriver
import unittest
class FirstPageTest(unittest.TestCase):
"""测试初始页面"""
def setUp(self):
self.browser = webdriver.Chrome()
def tearDown(self):
self.browser.quit()
def test_fp_function(self):
self.browser.get('http://localhost:8000')
self.assertIn('Django', self.browser.title)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 597 | py | 2 | function_test.py | 2 | 0.617094 | 0.579487 | 0 | 27 | 20.666667 | 51 |
JonasEc/historyproject2017 | 17,652,315,621,673 | 4eaefe97706bf9e5be0e3ff3cc6b36099e2bd7db | 6d8e7923c2a2c18a98a971ada87d63e32bdddf49 | /analyse/MergeSurreyTypes.py | c5600579d4396a277414f29ce7687e0989385fab | []
| no_license | https://github.com/JonasEc/historyproject2017 | a41242641d5631b41ea8b90ff3f861d50bfe8f0f | 6254f6d7066442ae042c4c461c16c2cc550d6ee8 | refs/heads/master | 2021-01-21T20:29:22.555395 | 2017-08-30T01:36:00 | 2017-08-30T01:36:00 | 92,239,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#############################################
# Jonas Mueller Gastell & Melanie Wallskog
# History Project
#############################################
# Create Battalion Types SC
# 2017 07 27
#########################
# "administrative stuff"
import pandas as pd
from os import chdir
#####################
# Data Storage, Output and Input
# select correct directory
directory = '/home/jonasmg/Prog/scrapinghistory/'
chdir(directory)
# what is our input?
inputfileSC = 'data/SCBaseSample2.csv'
listfile = 'data/SCBattsTypes.csv'
#and output?
outputfile = 'data/SCBaseSampleMerged.csv'
# Read in the data
dfSurreyTypes = pd.read_csv(listfile,sep =',')
dfSurreyTypes = dfSurreyTypes.loc[(dfSurreyTypes["contains_pals"] == 0) & ( dfSurreyTypes["nolocal_pals"] == 0) ]
dfSurreyTypes = dfSurreyTypes.drop(["x", "contains_pals", "nolocal_pals"], 1)
dfSurreyPeople = pd.read_csv(inputfileSC, sep=',')
dfSurreyPeople = dfSurreyPeople.drop(["Unnamed: 0"], 1)
dfSurreyPeople = dfSurreyPeople.rename(index=str, columns={"Regiment": "regiment"})
dfMerged = pd.merge(dfSurreyPeople, dfSurreyTypes, how= "inner" , on= "regiment")
dfMerged.to_csv(outputfile,sep=',', na_rep='', float_format=None, header=True,encoding='utf-8')
| UTF-8 | Python | false | false | 1,279 | py | 52 | MergeSurreyTypes.py | 47 | 0.644253 | 0.630962 | 0 | 48 | 25.625 | 113 |
League-Advisor/league-advisor | 2,345,052,153,786 | 9b6a3e60f0cdf4575d898aa15a77c87235d29e27 | 02f9eb2d16a6c8f58f0f242d1fbf66cf38f3c95f | /tests/test_sim_league_advisor.py | af72552a5ac19df70b7db03fc57af1983469af5a | [
"MIT"
]
| permissive | https://github.com/League-Advisor/league-advisor | 39de50f3e31381672f9d7ad35bff3c99fb39c9f2 | b77895833075ff13b075875eff421ec9fef9770e | refs/heads/main | 2023-09-05T02:44:12.866773 | 2021-11-11T09:32:35 | 2021-11-11T09:32:35 | 423,940,194 | 0 | 0 | MIT | false | 2021-11-12T11:10:08 | 2021-11-02T17:35:42 | 2021-11-11T09:32:39 | 2021-11-11T09:32:36 | 11,528 | 0 | 0 | 4 | Python | false | false | """This module tests LeagueAdvisor class methods"""
from league_advisor.league_advisor import LeagueAdvisor
from tests.flo import diff
def test_input_handler_help_user_color():
league_advisor = LeagueAdvisor()
diffs = diff(league_advisor.run_program, path="tests/simulations/help_color.sim.txt")
assert not diffs, diffs
def test_input_handler_solo_color():
league_advisor = LeagueAdvisor()
diffs = diff(league_advisor.run_program, path="tests/simulations/solo_champion_color.sim.txt")
assert not diffs, diffs
| UTF-8 | Python | false | false | 531 | py | 52 | test_sim_league_advisor.py | 27 | 0.758945 | 0.758945 | 0 | 14 | 36.857143 | 98 |
LSmyrnaios/DocUrlsRetriever | 18,846,316,512,797 | 75355149cbcb67696e18111c23113586bc26ae35 | b0fd5224a716fd95a1742640350673ab45d5d2fd | /logsAnalyzer.py | 07011b65354e90e76af4fd0b1eb7c2fe07a6c231 | [
"Apache-2.0"
]
| permissive | https://github.com/LSmyrnaios/DocUrlsRetriever | e04d3c730e9616b9c280bb981dfc1a6af57fb766 | 765e66cf2def97bfa1989024549ea5e23716fbb9 | refs/heads/master | 2021-07-10T13:04:52.676204 | 2021-06-27T18:14:29 | 2021-06-27T18:14:29 | 121,557,817 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This program analyzes the logfiles and extracts useful info :-)
It does not work since the main program moved to multi-thread mode.
TODO - Update this program to handle the multithread-logs..
Some thoughts:
Loop through the logs and detect the thread that each log is referring to and write it to a different file.
Keep the names of those files in a list and go through the files and analyze each one of them (each thread-file separately).
It's more appropriate to create multiple files, each one for each threadm than keepping the logs for each thread in-memory in a multimap.
There is more storage than memory...
@author Lampros Smyrnaios
"""
import datetime
import os
import re
import sys
import zipfile
from datetime import datetime
log_files_directory = None
file_no = 0
MAX_TIME_DIFFERENCE = 30 # 30 seconds
pre_date_time = None
errors_found = 0
specific_string_counter = 0
string_to_be_found = "The maximum limit ("
def get_time_object(line):
try:
date_time_string = re.search("(.*)\\s(?:INFO|DEBUG|WARN|ERROR).*", line).group(1)
except:
date_time_string = None
if date_time_string is None or len(date_time_string) == 0:
print("There was a problem when retrieving the time-string from the line:\n" + line, file=sys.stderr)
return None
try:
date_time_string += "000" # Add 3 zeros to simulate the "microseconds" at the end, since there's no support for milliseconds :-P
# print("date_time_string: " + date_time_string) # DEBUG!
time_object = datetime.datetime.strptime(date_time_string, "%Y-%m-%d %H:%M:%S.%f")
except:
return None
return time_object
def analyze_time_difference(current_line, previous_line, log_file):
global pre_date_time
current_date_time = get_time_object(current_line)
if current_date_time is None:
# print("No date_time_string for line: " + current_line) # DEBUG!
return
# Take the date and time of the current line
if pre_date_time is None:
pre_date_time = current_date_time
return
time_difference = abs(current_date_time - pre_date_time)
# print("time_difference: " + str(time_difference))
if time_difference > datetime.timedelta(seconds=MAX_TIME_DIFFERENCE):
print("Large time difference (" + str(time_difference) + " > " + str(MAX_TIME_DIFFERENCE) + " seconds) for the following lines of logging_file: " + log_file)
print(previous_line + current_line)
pre_date_time = current_date_time
def check_contained_strings(current_line):
global errors_found, specific_string_counter, string_to_be_found
if "ERROR" in current_line:
print("A line with an \"ERROR\" found:\n" + current_line)
errors_found += 1
elif string_to_be_found in current_line:
# print("A line containing the string \"" + string_to_be_found + "\" found:\n" + current_line) # DEBUG!
specific_string_counter += 1
def load_and_check_log_files():
# Open directory and show the files inside:
print("Opening log_directory: " + log_files_directory)
for file in os.scandir(log_files_directory):
file_name = file.name
full_file_path = os.path.join(log_files_directory, file_name)
if file_name.endswith(".zip"):
with zipfile.ZipFile(full_file_path, 'r') as zip_ref:
zip_ref.extractall(".")
file_list_size = len(zip_ref.filelist)
if file_list_size != 1:
print("The zip-file \"" + file_name + "\" contained more than one files (" + str(file_list_size) + ")!")
continue
file_name = zip_ref.filelist[0].filename
print("File name inside zip: " + file_name)
zip_ref.extractall(log_files_directory) # Just extract in the logs directory.
full_file_path = os.path.join(log_files_directory, file_name)
if not file_name.endswith(".log"):
print("Found a non \"LOG-file\": " + file_name)
print("Opening log_file: " + str(file_name))
previous_line = ""
with open(full_file_path, 'r+') as log_file:
for current_line in log_file:
if len(current_line) == 0 or current_line == "\n":
continue
# print("Current_line: " + current_line) # DEBUG
analyze_time_difference(current_line, previous_line, log_file.name)
check_contained_strings(current_line)
# Other metrics here
previous_line = current_line # Before moving to the next line
if __name__ == "__main__":
args = sys.argv
num_args = len(args)
if num_args != 2: # The first arg is the program's name and the second is the "log_files_directory".
print("Invalid number of argument given: " + str(num_args-1) + "\nExpected to get the \"log_files_directory\".", file=sys.stderr)
exit(1)
log_files_directory = args[1]
if not os.path.isdir(log_files_directory):
print("The following directory does not exists: " + log_files_directory + "\nRerun the program with a valid \"log_files_directory\"..", file=sys.stderr)
exit(2)
load_and_check_log_files()
print("Number of \"ERROR\"-instances found: " + str(errors_found))
print("Number of instances of \"" + string_to_be_found + "\" found: " + str(specific_string_counter))
exit(0)
| UTF-8 | Python | false | false | 5,453 | py | 48 | logsAnalyzer.py | 31 | 0.633963 | 0.629562 | 0 | 140 | 37.95 | 165 |
DISBi/django-disbi | 15,702,400,447,092 | 469c6d8493a28af5cb9891b650261dd3b539bd55 | 897b62f973175c7108790e7df023ea4d391bffce | /disbi/_import_export/exceptions.py | e129cf0118a2dbed167724d59ac0f2f16a5d3e6d | [
"BSD-2-Clause",
"MIT"
]
| permissive | https://github.com/DISBi/django-disbi | 72e652a582ec0290d9a34b6f26c5260c61ebfe91 | b63d9cb87daa3396d7bbca20268e89bf91cb0395 | refs/heads/master | 2021-01-16T23:04:42.315810 | 2019-07-17T07:02:29 | 2019-07-17T07:02:29 | 68,965,297 | 3 | 0 | MIT | false | 2018-11-13T20:49:01 | 2016-09-22T21:54:15 | 2018-08-28T19:30:39 | 2018-11-13T20:49:00 | 1,687 | 3 | 0 | 0 | Python | false | null | from __future__ import unicode_literals
class ImportExportError(Exception):
"""A generic exception for all others to extend."""
pass
class FieldError(ImportExportError):
"""Raised when a field encounters an error."""
pass
| UTF-8 | Python | false | false | 242 | py | 85 | exceptions.py | 41 | 0.710744 | 0.710744 | 0 | 11 | 21 | 55 |
luoxuwei/Compiler | 2,765,958,985,279 | 515ad1fb16f1881993ca07606b9e51cdc9e8c8df | 7588f030cb6f5c2692c14ec3caedf9f11a312cb1 | /pythonvm/test_case/30_test_class_method.py | b8d495e7fce033d69404419a3ff16851991a1267 | []
| no_license | https://github.com/luoxuwei/Compiler | cceee6af426ba9d9b2f13d22fcf6e06a692953ba | d11da97ab9ef2f1a61d0244d0080c6b3f90fd475 | refs/heads/master | 2022-12-12T12:20:27.988873 | 2022-11-26T16:29:00 | 2022-11-26T16:29:00 | 252,653,543 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | class A:
def say(self):
print(self)
print("hello")
a = A()
a.say()
| UTF-8 | Python | false | false | 88 | py | 271 | 30_test_class_method.py | 217 | 0.465909 | 0.465909 | 0 | 7 | 11.571429 | 22 |
ivanmtoroc/xyz-bank | 7,464,653,203,407 | 92d80d7df2118321697d9a77632693fa9169e92a | 62071acf24445e59274721c1b4cf75a50c0d2a55 | /apps/users/views.py | 35c754b499d19e6e899b324402b24e14eb385b90 | [
"MIT"
]
| permissive | https://github.com/ivanmtoroc/xyz-bank | 4773e6aa6e5d6e9f65ee4205352cdb22a661d8b0 | 7d1a383ee745a01f19e63e3254af389acb1628d9 | refs/heads/master | 2018-12-07T17:53:02.327324 | 2018-11-07T01:22:14 | 2018-11-07T01:22:14 | 148,047,855 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import LoginForm
def login_view(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username = username, password = password)
if user:
login(request, user)
return redirect('users:users')
context = { 'form': LoginForm() }
return render(request, 'users/login.html', context)
@login_required
def logout_view(request):
logout(request)
return redirect('users:login')
| UTF-8 | Python | false | false | 705 | py | 20 | views.py | 9 | 0.689362 | 0.689362 | 0 | 21 | 32.571429 | 78 |
RoboLoCo-5338/website | 6,880,537,630,958 | 7815f76dc698d4960be772bc6cfa25727c34f688 | 039dc473a25d22ae982867b53b39987541ff709a | /mysite/settings.py | f6934700c2075774ff223c7e332346e0a1e3b499 | [
"Apache-2.0"
]
| permissive | https://github.com/RoboLoCo-5338/website | 3d970c16f736884a71c86a9cb5c41e7a44734d46 | 331ce231e40fb9296f3a1602c59c12b43f86b938 | refs/heads/master | 2023-02-18T05:37:30.549891 | 2022-08-05T20:29:01 | 2022-08-05T20:29:01 | 143,949,138 | 0 | 1 | Apache-2.0 | false | 2023-02-15T20:53:48 | 2018-08-08T02:13:55 | 2021-12-12T06:34:51 | 2023-02-15T20:53:43 | 4,852 | 0 | 2 | 18 | Python | false | false | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", "INSECURE")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', 'False').lower() == 'true'
HEROKU_RELEASE_VERSION = os.environ.get("HEROKU_RELEASE_VERSION", "0")
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", "")
HEROKU_SLUG_COMMIT = os.environ.get("HEROKU_SLUG_COMMIT", "")
# Cache version - Use HEROKU_RELEASE_VERSION (eg v123)
VERSION = int(HEROKU_RELEASE_VERSION.replace('v', ''))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'scout.apps.ScoutConfig',
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'markdownify',
'nested_admin',
'django_celery_beat',
'simple_history',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'blog.ctx.heroku_info',
'blog.ctx.nav',
'blog.ctx.mains',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ACCOUNT_LOGOUT_REDIRECT_URL = "/blog"
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Markdownify https://django-markdownify.readthedocs.io/en/latest/settings.html
MARKDOWNIFY_BLEACH = False
MARKDOWNIFY_MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code',
'markdown.extensions.extra', ]
# Https instead of Http
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_PRELOAD = True
CSRF_USE_SESSIONS = True
# S3 file storage
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_AUTO_CREATE_BUCKET = True
AWS_BUCKET_ACL = "private"
AWS_DEFAULT_ACL = "private"
AWS_STORAGE_BUCKET_NAME = os.environ.get('BUCKETEER_BUCKET_NAME', 'insecure')
AWS_SECRET_ACCESS_KEY = os.environ.get('BUCKETEER_AWS_SECRET_ACCESS_KEY', 'insecure')
AWS_ACCESS_KEY_ID = os.environ.get('BUCKETEER_AWS_ACCESS_KEY_ID', 'insecure')
AWS_S3_USE_SSL = True
BASE_REDIS_URL_DEFAULT = 'redis://localhost'
BASE_REDIS_URL = os.environ.get('REDIS_URL', BASE_REDIS_URL_DEFAULT)
REDIS_DJANGO_CACHE_URL = BASE_REDIS_URL + "/1"
REDIS_CELERY_TASKS_URL = BASE_REDIS_URL + "/2"
REDIS_CELERY_TOMBS_URL = BASE_REDIS_URL + "/3"
if BASE_REDIS_URL != BASE_REDIS_URL_DEFAULT:
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': REDIS_DJANGO_CACHE_URL,
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
},
}
CELERY_BROKER_URL = REDIS_CELERY_TASKS_URL
CELERY_ACCEPT_CONTENT = ['json', ]
CELERY_RESULT_BACKEND = REDIS_CELERY_TOMBS_URL
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ENABLE_UTC = True
TBA_AUTH_KEY = os.environ.get('TBA_AUTH_KEY', None)
# Activate Django-Heroku.
django_heroku.settings(locals())
| UTF-8 | Python | false | false | 6,622 | py | 68 | settings.py | 47 | 0.690124 | 0.682271 | 0 | 214 | 29.943925 | 91 |
ketaki-thatte/algorithms_n_data_Structure | 10,299,331,602,932 | 946dae3904fa7493afc98a2b5e40f26baacb1e32 | 144895f1cd2ca059a6dab03d051f36ae96458f06 | /Searching_n_Sorting/binary_search.py | a35ed8fcd7eaa783d830e3094f032ceda0f3da0c | []
| no_license | https://github.com/ketaki-thatte/algorithms_n_data_Structure | 187568ce93933373bdb2983b0c201ca53aa3b19e | 5bb432eb8729ac983422391f36ecc4ebced1515a | refs/heads/master | 2020-03-26T02:34:51.490675 | 2018-12-11T15:48:53 | 2018-12-11T15:48:53 | 144,416,437 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def binarySearch(arr,num,start,end):
mid = (start + end) // 2
if start <= end:
if arr[mid] > num:
return binarySearch(arr,num,start,mid-1)
elif arr[mid] < num:
return binarySearch(arr,num,mid+1,end)
elif arr[mid] == num:
return mid
else:
return -1
if __name__ == '__main__':
arr = [1,3,4,6,8,19,28,33]
result = binarySearch(arr,28,0,len(arr)-1)
if result == -1:
print("Element does not exist")
else:
print("Element present at position : " +str(result)) | UTF-8 | Python | false | false | 484 | py | 12 | binary_search.py | 12 | 0.623967 | 0.582645 | 0 | 20 | 23.25 | 54 |
Imran2923/FYP-IT-audit-project | 18,829,136,651,469 | 54ae99626739603ff7ceb2811798f81e1527489e | 8b9700c75aef28a659e6089ce0c7142236051dd1 | /Windows_GUI.py | 05a820b90090bb7aa00ebfb48f90cf048b2eef8b | []
| no_license | https://github.com/Imran2923/FYP-IT-audit-project | a472c0cf5ee2de7b6b5d139645d421dcb358da10 | 8afed610a513943a70bf2125eaa7f22abf20913b | refs/heads/main | 2023-06-28T17:44:55.074855 | 2021-07-30T07:30:08 | 2021-07-30T07:30:08 | 386,152,471 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
from tkinter import ttk
from tkinter.ttk import *
import pyfiglet
import fileinput
import time
from parse import *
import winrm
#module for patch scan
import os
import subprocess as sp
#module for port scan
import sys
import socket
from datetime import datetime
import shutil
from tkinter import *
# ==== Vars ====
basicRes = []
log = []
target = 'localhost'
def basic():
host = IP.get()
domain = Domain.get()
user = username.get()
password = passwd.get()
session = winrm.Session(host, auth=('{}@{}' .format(user ,domain), password), transport='ntlm')
import time
import configparser
config = configparser.ConfigParser()
time = time.strftime("%Y_%m_%d-%I_%M_%S_%p")
timestr = time + " Basic Windows Settings.ini"
def complexity():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object ComplexityEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['complexity'] = p_dict
def maxpassage():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object MaxPasswordAge')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['maxpage'] = p_dict
def minpassage():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object MinPasswordAge')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['minpage'] = p_dict
def minplength():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object MinPasswordLength')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['minplength'] = p_dict
def phistorycount():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object PasswordHistoryCount')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['phistorycount'] = p_dict
def reverseencrypt():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object ReversibleEncryptionEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['reverseencrypt'] = p_dict
def lockoutduration():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object LockoutDuration')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lockouttime'] = p_dict
def lockoutobserve():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object LockoutObservationWindow')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lockoutobservetime'] = p_dict
def lockoutcount():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object LockoutThreshold')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lockoutthreshold'] = p_dict
def limitpass():
p = session.run_ps('Get-ItemProperty -Path HKLM:\SYSTEM\CurrentControlSet\Control\Lsa -Name "LimitBlankPasswordUse" | Select-Object LimitBlankPasswordUse')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['limitpass'] = p_dict
def crashonaudit():
p = session.run_ps('Get-ItemProperty -Path HKLM:\SYSTEM\CurrentControlSet\Control\Lsa -Name "CrashOnAuditFail" | Select-Object CrashOnAuditFail')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['crashonaudit'] = p_dict
def disablecad():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "DisableCAD" | Select-Object DisableCAD')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['disablecad'] = p_dict
def nousername():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "DontDisplayLastUserName" | Select-Object DontDisplayLastUserName')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['nousername'] = p_dict
def legaltext():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "LegalNoticeText" | Select-Object LegalNoticeText')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("\x00", "")
p_output = p_output.replace("---------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['legaltext'] = p_dict
def legalcaption():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "LegalNoticeCaption" | Select-Object LegalNoticeCaption')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['legalcaption'] = p_dict
def securitysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters -Name "RequireSecuritySignature" | Select-Object RequireSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['securitysig'] = p_dict
def enablesecuritysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters -Name "EnableSecuritySignature" | Select-Object EnableSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablesecuritysig'] = p_dict
def enableplainpass():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters -Name "EnablePlainTextPassword" | Select-Object EnablePlainTextPassword')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablesplainpass'] = p_dict
def serverautodisconnect():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "AutoDisconnect" | Select-Object AutoDisconnect')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serverautodisconnect'] = p_dict
def serversecuritysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "RequireSecuritySignature" | Select-Object RequireSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serversecuritysig'] = p_dict
def serverenablesecuritysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "EnableSecuritySignature" | Select-Object EnableSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serverenablesecuritysig'] = p_dict
def serverenableforcelogoff():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "enableforcedlogoff" | Select-Object enableforcedlogoff')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serverenableforcelogoff'] = p_dict
complexity()
maxpassage()
minpassage()
minplength()
phistorycount()
reverseencrypt()
lockoutduration()
lockoutobserve()
lockoutcount()
limitpass()
crashonaudit()
disablecad()
nousername()
legaltext()
legalcaption()
securitysig()
enablesecuritysig()
enableplainpass()
serverautodisconnect()
serversecuritysig()
serverenablesecuritysig()
serverenableforcelogoff()
with open(timestr,'w') as configfile:
config.write(configfile)
config.read(timestr)
count = 0
count2 = 0
print("\n")
print("==============================================================")
print("\n")
print("Windows Controls \n")
if(config['complexity']['ComplexityEnabled'] == "True"):
Stat1 = "No need to change Control: ComplexityEnabled \n"
count = count + 1
else:
Stat1 = "Setting 'ComplexityEnabled' requires change: False to True \n"
count2 = count2 + 1
if(config['maxpage']['MaxPasswordAge'] == "42.00:00:00"):
Stat2 = "No need to change Control: Maximum Password Age \n"
count = count + 1
else:
Stat2 = "Setting 'MaxPasswordAge' requires change: Set value to equal to or more than 42.00 \n"
count2 = count2 + 1
if(config['minpage']['MinPasswordAge'] == "1.00:00:00"):
Stat3 = "No need to change Control: Minimum Password Age \n"
count = count + 1
else:
Stat3 = "Setting 'MinPasswordAge' requires change: Set value to equal to or more than 1.00 \n"
count2 = count2 + 1
if(int(config['minplength']['MinPasswordLength']) >= 14):
Stat4 = "No need to change Control: MinPasswordLength \n"
count = count + 1
else:
Stat4 = "Setting 'MinPasswordLength' requires change: Set value to equal to or more than 14 \n"
count2 = count2 + 1
if(int(config['phistorycount']['PasswordHistoryCount']) >= 24):
Stat5 = "No need to change Control: PasswordHistoryCount \n"
count = count + 1
else:
Stat5 = "Setting 'PasswordHistoryCount' requires change: Set value to equal to or more than 24 \n"
count2 = count2 + 1
if(config['reverseencrypt']['ReversibleEncryptionEnabled'] == "False"):
Stat6 = "No need to change Control: ReversibleEncryptionEnabled \n"
count = count + 1
else:
Stat6 = "Setting 'ReversibleEncryptionEnabled' requires change: True to False \n"
count2 = count2 + 1
if(config['lockouttime']['LockoutDuration'] == "00:30:00"):
Stat7 = "No need to change Control: LockoutDuration \n"
count = count + 1
else:
Stat7 = "Setting 'LockoutDuration' requires change: Set value to 15 or more minutes \n"
count2 = count2 + 1
if(config['lockoutobservetime']['LockoutObservationWindow'] == "00:30:00"):
Stat8 = "No need to change Control: LockoutObservationWindow \n"
count = count + 1
else:
Stat8 = "Setting 'LockoutObservationWindow' requires change: Set value to 15 or more minutes \n"
count2 = count2 + 1
if(int(config['lockoutthreshold']['LockoutThreshold']) <= 10 and int(config['lockoutthreshold']['LockoutThreshold']) != 0 ):
Stat9 = "No need to change Control: LockoutThreshold \n"
count = count + 1
else:
Stat9 = "Setting 'LockoutThreshold' requires change: Set value to 10 or fewer invalid logon attempts but not 0 \n"
count2 = count2 + 1
if(int(config['limitpass']['limitblankpassworduse']) == 1):
Stat10 = "No need to change Control: LimitBlankPasswordUse \n"
count = count + 1
else:
Stat10 = "Setting 'LimitBlankPasswordUse' requires change: Set value to 1 OR Enable in Accounts: Limit local account use of blank passwords to console logon only in GPO \n"
count2 = count2 + 1
if(int(config['crashonaudit']['crashonauditfail']) == 0):
Stat11 = "No need to change Control: CrashOnAuditFail \n"
count = count + 1
else:
Stat11 = "Setting 'CrashOnAuditFail' requires change: Set value to 0 OR Ensure in Audit: Shut down system immediately if unable to log security audits' is set to 'Disabled' in GPO \n"
count2 = count2 + 1
if(int(config['disablecad']['disablecad']) == 0):
Stat12 = "No need to change Control: DisableCAD \n"
count = count + 1
else:
Stat12 = "Setting 'DisableCAD' requires change: Set value to 0 OR Ensure in 'Interactive logon: Do not require CTRL+ALT+DEL' is set to 'Disabled' in GPO \n"
count2 = count2 + 1
if(int(config['nousername']['dontdisplaylastusername']) == 1):
Stat13 = "No need to change Control: DontDisplayLastUserName \n"
count = count + 1
else:
Stat13 = "Setting 'DontDisplayLastUserName' requires change: Set value to 1 OR Ensure in 'Interactive logon: Don't display last signed-in' is set to 'Enabled' in GPO \n"
count2 = count2 + 1
if(config['legaltext']['legalnoticetext'] != "\x00" ):
Stat14 = "No need to change Control: LegalNoticeText \n"
count = count + 1
else:
Stat14 = "Setting 'LegalNoticeText' requires change: Configure 'Interactive logon: Message text for users attempting to log on' in GPO \n"
count2 = count2 + 1
if(config['legalcaption']['legalnoticecaption'] != "" ):
Stat15 = "No need to change Control: LegalNoticeCaption \n"
count = count + 1
else:
Stat15 = "Setting 'LegalNoticeCaption' requires change: Configure 'Interactive logon: Message title for users attempting to log on' in GPO \n"
count2 = count2 + 1
if(int(config['securitysig']['requiresecuritysignature']) == 1 ):
Stat16 = "No need to change Control: RequireSecuritySignature \n"
count = count + 1
else:
Stat16 = "Setting 'RequireSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network client: Digitally sign communications (always)' is set to 'Enabled' in GPO \n"
count2 = count2 + 1
if(int(config['enablesecuritysig']['enablesecuritysignature']) == 1 ):
Stat17 = "No need to change Control: EnableSecuritySignature \n"
count = count + 1
else:
Stat17 = "Setting 'EnableSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network client: Digitally sign communications (if server agrees)' is set to 'Enabled' in GPO \n"
count2 = count2 + 1
if(int(config['enablesplainpass']['enableplaintextpassword']) == 0 ):
Stat18 = "No need to change Control: EnablePlainTextPassword \n"
count = count + 1
else:
Stat18 = "Setting 'EnablePlainTextPassword' requires change: Set value to 0 OR Ensure 'Microsoft network client: Send unencrypted password to third-party SMB servers' is set to 'Disabled' in GPO \n"
count2 = count2 + 1
if(int(config['serverautodisconnect']['autodisconnect']) <= 15 ):
Stat19 = "No need to change Control: Server AutoDisconnect \n"
count = count + 1
else:
Stat19 = "Setting 'Server AutoDisconnect' requires change: Set value to fewer or lesser than 15 OR Ensure 'Microsoft network server: Amount of idle time required before suspending session' is set to '15 or fewer minute(s)' in GPO \n"
count2 = count2 + 1
if(int(config['serversecuritysig']['requiresecuritysignature']) == 1 ):
Stat20 = "No need to change Control: Server RequireSecuritySignature \n"
count = count + 1
else:
Stat20 = "Setting 'Server RequireSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network server: Digitally sign communications (always)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['serverenablesecuritysig']['enablesecuritysignature']) == 1 ):
Stat21 = "No need to change Control: Server EnableSecuritySignature \n"
count = count + 1
else:
Stat21 = "Setting 'Server EnableSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network server: Digitally sign communications (if client agrees)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['serverenableforcelogoff']['enableforcedlogoff']) == 1 ):
Stat22 = "No need to change Control: Server enableforcedlogoff \n"
count = count + 1
else:
Stat22 = "Setting 'Server enableforcedlogoff' requires change: Set value to 1 OR Ensure 'Microsoft network server: Disconnect clients when logon hours expire' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
#print(config.sections())
print("\n")
print("============================================================== \n")
listbox.insert(0, "Writing to " + timestr + " in program folder.")
listbox.insert(1, " ")
listbox.insert(2, "Account Security + Remediations")
listbox.insert(3, " ")
listbox.insert(4, Stat1)
listbox.insert(4, Stat2)
listbox.insert(4, Stat3)
listbox.insert(4, Stat4)
listbox.insert(4, Stat5)
listbox.insert(4, Stat6)
listbox.insert(4, Stat7)
listbox.insert(4, Stat8)
listbox.insert(4, Stat9)
listbox.insert(4, Stat10)
listbox.insert(4, Stat11)
listbox.insert(4, Stat12)
listbox.insert(4, Stat13)
listbox.insert(4, Stat14)
listbox.insert(4, Stat15)
listbox.insert(4, Stat16)
listbox.insert(4, Stat17)
listbox.insert(4, Stat18)
listbox.insert(4, Stat19)
listbox.insert(4, Stat20)
listbox.insert(4, Stat21)
listbox.insert(4, Stat22)
listbox2.insert(0, "\nNumber of Compliant controls")
listbox2.insert(1, "--> " + str(count))
listbox2.insert(2, "Number of Non-Compliant controls")
listbox2.insert(3, "--> " + str(count2))
def startScan_Intermediate():
host = IP.get()
domain = Domain.get()
user = username.get()
password = passwd.get()
session = winrm.Session(host, auth=('{}@{}' .format(user ,domain), password), transport='ntlm')
import time
import configparser
config = configparser.ConfigParser()
time = time.strftime("%Y_%m_%d-%I_%M_%S_%p")
timestr = time + " Intermediate Windows Settings.ini"
def complexity():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object ComplexityEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['complexity'] = p_dict
def maxpassage():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object MaxPasswordAge')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['maxpage'] = p_dict
def minpassage():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object MinPasswordAge')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['minpage'] = p_dict
def minplength():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object MinPasswordLength')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['minplength'] = p_dict
def phistorycount():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object PasswordHistoryCount')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['phistorycount'] = p_dict
def reverseencrypt():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object ReversibleEncryptionEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['reverseencrypt'] = p_dict
def lockoutduration():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object LockoutDuration')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lockouttime'] = p_dict
def lockoutobserve():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object LockoutObservationWindow')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lockoutobservetime'] = p_dict
def lockoutcount():
p = session.run_ps('Get-ADDefaultDomainPasswordPolicy | Select-Object LockoutThreshold')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lockoutthreshold'] = p_dict
def limitpass():
p = session.run_ps('Get-ItemProperty -Path HKLM:\SYSTEM\CurrentControlSet\Control\Lsa -Name "LimitBlankPasswordUse" | Select-Object LimitBlankPasswordUse')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['limitpass'] = p_dict
def crashonaudit():
p = session.run_ps('Get-ItemProperty -Path HKLM:\SYSTEM\CurrentControlSet\Control\Lsa -Name "CrashOnAuditFail" | Select-Object CrashOnAuditFail')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['crashonaudit'] = p_dict
def disablecad():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "DisableCAD" | Select-Object DisableCAD')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['disablecad'] = p_dict
def nousername():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "DontDisplayLastUserName" | Select-Object DontDisplayLastUserName')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['nousername'] = p_dict
def legaltext():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "LegalNoticeText" | Select-Object LegalNoticeText')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['legaltext'] = p_dict
def legalcaption():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "LegalNoticeCaption" | Select-Object LegalNoticeCaption')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['legalcaption'] = p_dict
def securitysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters -Name "RequireSecuritySignature" | Select-Object RequireSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['securitysig'] = p_dict
def enablesecuritysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters -Name "EnableSecuritySignature" | Select-Object EnableSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablesecuritysig'] = p_dict
def enableplainpass():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters -Name "EnablePlainTextPassword" | Select-Object EnablePlainTextPassword')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablesplainpass'] = p_dict
def serverautodisconnect():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "AutoDisconnect" | Select-Object AutoDisconnect')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serverautodisconnect'] = p_dict
def serversecuritysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "RequireSecuritySignature" | Select-Object RequireSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serversecuritysig'] = p_dict
def serverenablesecuritysig():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "EnableSecuritySignature" | Select-Object EnableSecuritySignature')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serverenablesecuritysig'] = p_dict
def serverenableforcelogoff():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "enableforcedlogoff" | Select-Object enableforcedlogoff')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['serverenableforcelogoff'] = p_dict
def screensaveractive():
p = session.run_ps('Get-Wmiobject win32_desktop | where name -match $env:USERNAME | Select-Object ScreenSaveActive')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['screensaveractive'] = p_dict
def screensaversecure():
p = session.run_ps('Get-Wmiobject win32_desktop | where name -match $env:USERNAME | Select-Object ScreenSaverIsSecure')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['screensaversecure'] = p_dict
def screensavertimeout():
p = session.run_ps('Get-Wmiobject win32_desktop | where name -match $env:USERNAME | Select-Object ScreenSaverTimeout')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['screensavertimeout'] = p_dict
def anonymousno():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Control\Lsa -Name "RestrictAnonymous" | Select-Object RestrictAnonymous')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['anonymousno'] = p_dict
def disabledcreds():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Control\Lsa -Name "DisableDomainCreds" | Select-Object DisableDomainCreds')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['disabledcreds'] = p_dict
def includeanon():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Control\Lsa -Name "EveryoneIncludesAnonymous" | Select-Object EveryoneIncludesAnonymous')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['includeanon'] = p_dict
def restrictnull():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanManServer\Parameters -Name "RestrictNullSessAccess" | Select-Object RestrictNullSessAccess')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['restrictnull'] = p_dict
def forceguest():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Control\Lsa -Name "ForceGuest" | Select-Object ForceGuest')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['forceguest'] = p_dict
def nolmhash():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Control\Lsa -Name "NoLMHash" | Select-Object NoLMHash')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['nolmhash'] = p_dict
def ldapintergrity():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LDAP -Name "LDAPClientIntegrity" | Select-Object LDAPClientIntegrity')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ldapintergrity'] = p_dict
def behavioradmin():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "ConsentPromptBehaviorAdmin" | Select-Object ConsentPromptBehaviorAdmin')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['behavioradmin'] = p_dict
def behavioruser():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "ConsentPromptBehaviorUser" | Select-Object ConsentPromptBehaviorUser')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['behavioruser'] = p_dict
def installdetect():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "EnableInstallerDetection" | Select-Object EnableInstallerDetection')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['installdetect'] = p_dict
def enablesecureUIA():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "EnableSecureUIAPaths" | Select-Object EnableSecureUIAPaths')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablesecureUIA'] = p_dict
def enablelua():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "EnableLUA" | Select-Object EnableLUA')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablelua'] = p_dict
def promptsecure():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "PromptOnSecureDesktop" | Select-Object PromptOnSecureDesktop')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['promptsecure'] = p_dict
def enablevirtual():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "EnableVirtualization" | Select-Object EnableVirtualization')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['enablevirtual'] = p_dict
def combrowser():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\Browser -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['combrowser'] = p_dict
def mapsbroker():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\MapsBroker -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['mapsbroker'] = p_dict
def lfsvc():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\lfsvc -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lfsvc'] = p_dict
def shareaccess():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\SharedAccess -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['shareaccess'] = p_dict
def lltdsvc():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\lltdsvc -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['lltdsvc'] = p_dict
def msis():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\MSiSCSI -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['msis'] = p_dict
def sshd():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\sshd -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['sshd'] = p_dict
def wercplsupport():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\wercplsupport -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['wercplsupport'] = p_dict
def RasAuto():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\RasAuto -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['RasAuto'] = p_dict
def SessionEnv():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\SessionEnv -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SessionEnv'] = p_dict
def TermService():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\TermService -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['TermService'] = p_dict
def UmRdpService():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\\UmRdpService -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['UmRdpService'] = p_dict
def RpcLocator():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\RpcLocator -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['RpcLocator'] = p_dict
def RemoteRegistry():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\RemoteRegistry -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['RemoteRegistry'] = p_dict
def RemoteAccess():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\RemoteAccess -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['RemoteAccess'] = p_dict
def LanmanServer():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\LanmanServer -Name "Start" | Select-Object Start')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['LanmanServer'] = p_dict
def sealsecure():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\\Netlogon\Parameters -Name "SealSecureChannel" | Select-Object SealSecureChannel')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['sealsecure'] = p_dict
def signsecure():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\\Netlogon\Parameters -Name "SignSecureChannel" | Select-Object SignSecureChannel')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['signsecure'] = p_dict
def disablepasschange():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\\Netlogon\Parameters -Name "DisablePasswordChange" | Select-Object DisablePasswordChange')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['disablepasschange'] = p_dict
def machinemaxpasswrdage():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\\Netlogon\Parameters -Name "MaximumPasswordAge" | Select-Object MaximumPasswordAge')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['machinemaxpasswrdage'] = p_dict
def requirestrongkey():
p = session.run_ps('Get-ItemProperty -Path HKLM:SYSTEM\CurrentControlSet\Services\\Netlogon\Parameters -Name "RequireStrongKey" | Select-Object RequireStrongKey')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['requirestrongkey'] = p_dict
def autorestartsignon():
p = session.run_ps('Get-ItemProperty -Path HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name "DisableAutomaticRestartSignOn" | Select-Object DisableAutomaticRestartSignOn')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['autorestartsignon'] = p_dict
def cachedlogons():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon" -Name "CachedLogonsCount" | Select-Object CachedLogonsCount')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['cachedlogons'] = p_dict
def passexpirywarn():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon" -Name "PasswordExpiryWarning" | Select-Object PasswordExpiryWarning')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['passexpirywarn'] = p_dict
def scremove():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon" -Name "ScRemoveOption" | Select-Object ScRemoveOption')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['scremove'] = p_dict
def disableexceptionchainvalid():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SYSTEM\CurrentControlSet\Control\Session Manager\kernel" -Name "DisableExceptionChainValidation" | Select-Object DisableExceptionChainValidation')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['disableexceptionchainvalid'] = p_dict
def ObCaseInsensitive():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SYSTEM\CurrentControlSet\Control\Session Manager\kernel" -Name "ObCaseInsensitive" | Select-Object ObCaseInsensitive')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ObCaseInsensitive'] = p_dict
def forceunlocklog():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon" -Name "ForceUnlockLogon" | Select-Object ForceUnlockLogon')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['forceunlocklog'] = p_dict
def restrictanonsam():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SYSTEM\CurrentControlSet\Control\Lsa" -Name "RestrictAnonymousSAM" | Select-Object RestrictAnonymousSAM')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['restrictanonsam'] = p_dict
def shutdownnologon():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" -Name "ShutdownWithoutLogon" | Select-Object ShutdownWithoutLogon')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['shutdownnologon'] = p_dict
def ProtectionMode():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SYSTEM\CurrentControlSet\Control\Session Manager" -Name "ProtectionMode" | Select-Object ProtectionMode')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ProtectionMode'] = p_dict
complexity()
maxpassage()
minpassage()
minplength()
phistorycount()
reverseencrypt()
lockoutduration()
lockoutobserve()
lockoutcount()
limitpass()
crashonaudit()
disablecad()
nousername()
legaltext()
legalcaption()
securitysig()
enablesecuritysig()
enableplainpass()
serverautodisconnect()
serversecuritysig()
serverenablesecuritysig()
serverenableforcelogoff()
screensaveractive()
screensaversecure()
screensavertimeout()
anonymousno()
disabledcreds()
includeanon()
restrictnull()
forceguest()
nolmhash()
ldapintergrity()
behavioradmin()
behavioruser()
installdetect()
enablesecureUIA()
enablelua()
promptsecure()
enablevirtual()
combrowser()
mapsbroker()
lfsvc()
shareaccess()
lltdsvc()
msis()
sshd()
wercplsupport()
RasAuto()
SessionEnv()
TermService()
UmRdpService()
RpcLocator()
RemoteRegistry()
RemoteAccess()
LanmanServer()
sealsecure()
signsecure()
disablepasschange()
machinemaxpasswrdage()
requirestrongkey()
autorestartsignon()
cachedlogons()
passexpirywarn()
scremove()
disableexceptionchainvalid()
ObCaseInsensitive()
forceunlocklog()
restrictanonsam()
shutdownnologon()
ProtectionMode()
with open(timestr,'w') as configfile:
config.write(configfile)
config.read(timestr)
count = 0
count2 = 0
print("\n")
print("==============================================================")
print("\n")
print("Windows Controls \n")
if(config['complexity']['ComplexityEnabled'] == "True"):
Stat1 = "No need to change Control: ComplexityEnabled \n"
count = count + 1
else:
Stat1 = "Setting 'ComplexityEnabled' requires change: False to True \n"
count2 = count2 + 1
if(config['maxpage']['MaxPasswordAge'] == "42.00:00:00"):
Stat2 = "No need to change Control: Maximum Password Age \n"
count = count + 1
else:
Stat2 = "Setting 'MaxPasswordAge' requires change: Set value to equal to or more than 42.00 \n"
count2 = count2 + 1
if(config['minpage']['MinPasswordAge'] == "1.00:00:00"):
Stat3 = "No need to change Control: Minimum Password Age \n"
count = count + 1
else:
Stat3 = "Setting 'MinPasswordAge' requires change: Set value to equal to or more than 1.00 \n"
count2 = count2 + 1
if(int(config['minplength']['MinPasswordLength']) >= 14):
Stat4 = "No need to change Control: MinPasswordLength \n"
count = count + 1
else:
Stat4 = "Setting 'MinPasswordLength' requires change: Set value to equal to or more than 14 \n"
count2 = count2 + 1
if(int(config['phistorycount']['PasswordHistoryCount']) >= 24):
Stat5 = "No need to change Control: PasswordHistoryCount \n"
count = count + 1
else:
Stat5 = "Setting 'PasswordHistoryCount' requires change: Set value to equal to or more than 24 \n"
count2 = count2 + 1
if(config['reverseencrypt']['ReversibleEncryptionEnabled'] == "False"):
Stat6 = "No need to change Control: ReversibleEncryptionEnabled \n"
count = count + 1
else:
Stat6 = "Setting 'ReversibleEncryptionEnabled' requires change: True to False \n"
count2 = count2 + 1
if(config['lockouttime']['LockoutDuration'] == "00:30:00"):
Stat7 = "No need to change Control: LockoutDuration \n"
count = count + 1
else:
Stat7 = "Setting 'LockoutDuration' requires change: Set value to 15 or more minutes \n"
count2 = count2 + 1
if(config['lockoutobservetime']['LockoutObservationWindow'] == "00:30:00"):
Stat8 = "No need to change Control: LockoutObservationWindow \n"
count = count + 1
else:
Stat8 = "Setting 'LockoutObservationWindow' requires change: Set value to 15 or more minutes \n"
count2 = count2 + 1
if(int(config['lockoutthreshold']['LockoutThreshold']) <= 10 and int(config['lockoutthreshold']['LockoutThreshold']) != 0 ):
Stat9 = "No need to change Control: LockoutThreshold \n"
count = count + 1
else:
Stat9 = "Setting 'LockoutThreshold' requires change: Set value to 10 or fewer invalid logon attempts but not 0 \n"
count2 = count2 + 1
if(int(config['limitpass']['limitblankpassworduse']) == 1):
Stat10 = "No need to change Control: LimitBlankPasswordUse \n"
count = count + 1
else:
Stat10 = "Setting 'LimitBlankPasswordUse' requires change: Set value to 1 OR Enable in Accounts: Limit local account use of blank passwords to console logon only in GPO \n"
count2 = count2 + 1
if(int(config['crashonaudit']['crashonauditfail']) == 0):
Stat11 = "No need to change Control: CrashOnAuditFail \n"
count = count + 1
else:
Stat11 = "Setting 'CrashOnAuditFail' requires change: Set value to 0 OR Ensure in Audit: Shut down system immediately if unable to log security audits' is set to 'Disabled' in GPO \n"
count2 = count2 + 1
if(int(config['disablecad']['disablecad']) == 0):
Stat12 = "No need to change Control: DisableCAD \n"
count = count + 1
else:
Stat12 = "Setting 'DisableCAD' requires change: Set value to 0 OR Ensure in 'Interactive logon: Do not require CTRL+ALT+DEL' is set to 'Disabled' in GPO \n"
count2 = count2 + 1
if(int(config['nousername']['dontdisplaylastusername']) == 1):
Stat13 = "No need to change Control: DontDisplayLastUserName \n"
count = count + 1
else:
Stat13 = "Setting 'DontDisplayLastUserName' requires change: Set value to 1 OR Ensure in 'Interactive logon: Don't display last signed-in' is set to 'Enabled' in GPO \n"
count2 = count2 + 1
if(config['legaltext']['legalnoticetext'] != "" ):
Stat14 = "No need to change Control: LegalNoticeText \n"
count = count + 1
else:
Stat14 = "Setting 'LegalNoticeText' requires change: Configure 'Interactive logon: Message text for users attempting to log on' in GPO \n"
count2 = count2 + 1
if(config['legalcaption']['legalnoticecaption'] != "" ):
Stat15 = "No need to change Control: LegalNoticeCaption \n"
count = count + 1
else:
Stat15 = "Setting 'LegalNoticeCaption' requires change: Configure 'Interactive logon: Message title for users attempting to log on' in GPO \n"
count2 = count2 + 1
if(int(config['securitysig']['requiresecuritysignature']) == 1 ):
Stat16 = "No need to change Control: RequireSecuritySignature \n"
count = count + 1
else:
Stat16 = "Setting 'RequireSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network client: Digitally sign communications (always)' is set to 'Enabled' in GPO \n"
count2 = count2 + 1
if(int(config['enablesecuritysig']['enablesecuritysignature']) == 1 ):
Stat17 = "No need to change Control: EnableSecuritySignature \n"
count = count + 1
else:
Stat17 = "Setting 'EnableSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network client: Digitally sign communications (if server agrees)' is set to 'Enabled' in GPO \n"
count2 = count2 + 1
if(int(config['enablesplainpass']['enableplaintextpassword']) == 0 ):
Stat18 = "No need to change Control: EnablePlainTextPassword \n"
count = count + 1
else:
Stat18 = "Setting 'EnablePlainTextPassword' requires change: Set value to 0 OR Ensure 'Microsoft network client: Send unencrypted password to third-party SMB servers' is set to 'Disabled' in GPO \n"
count2 = count2 + 1
if(int(config['serverautodisconnect']['autodisconnect']) <= 15 ):
Stat19 = "No need to change Control: Server AutoDisconnect \n"
count = count + 1
else:
Stat19 = "Setting 'Server AutoDisconnect' requires change: Set value to fewer or lesser than 15 OR Ensure 'Microsoft network server: Amount of idle time required before suspending session' is set to '15 or fewer minute(s)' in GPO \n"
count2 = count2 + 1
if(int(config['serversecuritysig']['requiresecuritysignature']) == 1 ):
Stat20 = "No need to change Control: Server RequireSecuritySignature \n"
count = count + 1
else:
Stat20 = "Setting 'Server RequireSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network server: Digitally sign communications (always)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['serverenablesecuritysig']['enablesecuritysignature']) == 1 ):
Stat21 = "No need to change Control: Server EnableSecuritySignature \n"
count = count + 1
else:
Stat21 = "Setting 'Server EnableSecuritySignature' requires change: Set value to 1 OR Ensure 'Microsoft network server: Digitally sign communications (if client agrees)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['serverenableforcelogoff']['enableforcedlogoff']) == 1 ):
Stat22 = "No need to change Control: Server enableforcedlogoff \n"
count = count + 1
else:
Stat22 = "Setting 'Server enableforcedlogoff' requires change: Set value to 1 OR Ensure 'Microsoft network server: Disconnect clients when logon hours expire' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(config['screensaveractive']['screensaveactive'] != "" ):
Stat23 = "No need to change Control: screensaveactive \n"
count = count + 1
else:
Stat23 = "Setting 'screensaveactive' requires change: Ensure 'Enable screen saver' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(config['screensaversecure']['screensaverissecure'] != "" ):
Stat24 = "No need to change Control: screensaverissecure \n"
count = count + 1
else:
Stat24 = "Setting 'screensaverissecure' requires change: Ensure 'Password protect the screen saver' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(config['screensavertimeout']['screensavertimeout'] != "" ):
Stat25 = "No need to change Control: screensavertimeout \n"
count = count + 1
else:
Stat25 = "Setting 'screensavertimeout' requires change: Ensure 'Screen saver timeout' is set to 'Enabled: 900 seconds or fewer, but not 0' in GPO \n \n"
count2 = count2 + 1
if(int(config['anonymousno']['restrictanonymous']) == 1 ):
Stat26 = "No need to change Control: RestrictAnonymous \n"
count = count + 1
else:
Stat26 = "Setting 'RestrictAnonymous' requires change: Ensure 'Network access: Do not allow anonymous enumeration of SAM accounts and shares' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['disabledcreds']['disabledomaincreds']) == 1 ):
Stat27 = "No need to change Control: DisableDomainCreds \n"
count = count + 1
else:
Stat27 = "Setting 'DisableDomainCreds' requires change: Ensure 'Network access: Do not allow storage of passwords and credentials for network authentication' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['includeanon']['everyoneincludesanonymous']) == 0 ):
Stat28 = "No need to change Control: EveryoneIncludesAnonymous \n"
count = count + 1
else:
Stat28 = "Setting 'EveryoneIncludesAnonymous' requires change: Ensure 'Network access: Let Everyone permissions apply to anonymous users' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['restrictnull']['restrictnullsessaccess']) == 1 ):
Stat29 = "No need to change Control: RestrictNullSessAccess \n"
count = count + 1
else:
Stat29 = "Setting 'RestrictNullSessAccess' requires change: Ensure 'Network access: Restrict anonymous access to Named Pipes and Shares' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['forceguest']['forceguest']) == 0 ):
Stat30 = "No need to change Control: ForceGuest \n"
count = count + 1
else:
Stat30 = "Setting 'ForceGuest' requires change: Ensure 'Network access: Sharing and security model for local accounts' is set to 'Classic - local users authenticate as themselves' in GPO \n \n"
count2 = count2 + 1
if(int(config['nolmhash']['nolmhash']) == 1 ):
Stat31 = "No need to change Control: NoLMHash \n"
count = count + 1
else:
Stat31 = "Setting 'NoLMHash' requires change: Ensure 'Network security: Do not store LAN Manager hash value on next password change' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['ldapintergrity']['ldapclientintegrity']) >= 1 ):
Stat32 = "No need to change Control: LDAPClientIntegrity \n"
count = count + 1
else:
Stat32 = "Setting 'LDAPClientIntegrity' requires change: Ensure 'Network security: LDAP client signing requirements' is set to 'Negotiate signing' or higher in GPO \n \n"
count2 = count2 + 1
if(int(config['behavioradmin']['consentpromptbehavioradmin']) >= 1 ):
Stat33 = "No need to change Control: ConsentPromptBehaviorAdmin \n"
count = count + 1
else:
Stat33 = "Setting 'ConsentPromptBehaviorAdmin' requires change: Ensure 'User Account Control: Behavior of the elevation for consent on the secure desktop' in GPO \n \n"
count2 = count2 + 1
if(int(config['behavioruser']['consentpromptbehavioruser']) >= 1 ):
Stat34 = "No need to change Control: ConsentPromptBehaviorUser \n"
count = count + 1
else:
Stat34 = "Setting 'ConsentPromptBehaviorUser' requires change: Ensure 'User Account Control: Behavior of the elevation prompt for standard users' is set to 'Automatically deny elevation requests' in GPO \n \n"
count2 = count2 + 1
if(int(config['installdetect']['enableinstallerdetection']) >= 1 ):
Stat35 = "No need to change Control: EnableInstallerDetection \n"
count = count + 1
else:
Stat35 = "Setting 'EnableInstallerDetection' requires change: Ensure 'User Account Control: Detect application installations and prompt for elevation' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['enablesecureUIA']['enablesecureuiapaths']) >= 1 ):
Stat36 = "No need to change Control: EnableSecureUIAPaths \n"
count = count + 1
else:
Stat36 = "Setting 'EnableSecureUIAPaths' requires change: Ensure 'User Account Control: Only elevate UIAccess applications that are installed in secure locations' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['enablelua']['enablelua']) >= 1 ):
Stat37 = "No need to change Control: EnableLUA \n"
count = count + 1
else:
Stat37 = "Setting 'EnableLUA' requires change: Ensure 'User Account Control: Run all administrators in Admin Approval Mode' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['promptsecure']['promptonsecuredesktop']) >= 1 ):
Stat38 = "No need to change Control: PromptOnSecureDesktop \n"
count = count + 1
else:
Stat38 = "Setting 'PromptOnSecureDesktop' requires change: Ensure 'User Account Control: Switch to the secure desktop when prompting for elevation' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['enablevirtual']['enablevirtualization']) >= 1 ):
Stat39 = "No need to change Control: EnableVirtualization \n"
count = count + 1
else:
Stat39 = "Setting 'EnableVirtualization' requires change: Ensure 'User Account Control: Virtualize file and registry write failures to per-user locations' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['combrowser']['start']) == 2 or int(config['combrowser']['start']) == 4 ):
Stat40 = "No need to change Control: Browser \n"
count = count + 1
else:
Stat40 = "Setting 'Browser' requires change: Ensure 'Computer Browser (Browser)' is set to 'Disabled' or 'Not Installed' in GPO \n \n"
count2 = count2 + 1
if(int(config['mapsbroker']['start']) == 2):
Stat41 = "No need to change Control: MapsBroker \n"
count = count + 1
else:
Stat41 = "Setting 'MapsBroker' requires change: Ensure 'Downloaded Maps Manager (MapsBroker)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['lfsvc']['start']) == 3):
Stat42 = "No need to change Control: lfsvc \n"
count = count + 1
else:
Stat42 = "Setting 'lfsvc' requires change: Ensure 'Geolocation Service (lfsvc)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['shareaccess']['start']) == 2):
Stat43 = "No need to change Control: SharedAccess \n"
count = count + 1
else:
Stat43 = "Setting 'SharedAccess' requires change: Ensure 'Internet Connection Sharing (ICS) (SharedAccess)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['lltdsvc']['start']) == 3):
Stat44 = "No need to change Control: lltdsvc \n"
count = count + 1
else:
Stat44 = "Setting 'lltdsvc' requires change: Ensure 'Link-Layer Topology Discovery Mapper (lltdsvc)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['msis']['start']) == 3):
Stat45 = "No need to change Control: MSiSCSI \n"
count = count + 1
else:
Stat45 = "Setting 'MSiSCSI' requires change: Ensure 'Microsoft iSCSI Initiator Service (MSiSCSI)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['sshd']['start']) == 0 or int(config['sshd']['start'] == 4 )):
Stat46 = "No need to change Control: sshd \n"
count = count + 1
else:
Stat46 = "Setting 'sshd' requires change: Ensure 'OpenSSH SSH Server (sshd)' is set to 'Disabled' or 'Not Installed' in GPO \n \n"
count2 = count2 + 1
if(int(config['wercplsupport']['start']) == 0 ):
Stat47 = "No need to change Control: wercplsupport \n"
count = count + 1
else:
Stat47 = "Setting 'wercplsupport' requires change: Ensure 'Problem Reports and Solutions Control Panel Support (wercplsupport)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['RasAuto']['start']) == 0 ):
Stat48 = "No need to change Control: RasAuto \n"
count = count + 1
else:
Stat48 = "Setting 'RasAuto' requires change: Ensure 'Remote Access Auto Connection Manager (RasAuto)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['SessionEnv']['start']) == 3 ):
Stat49 = "No need to change Control: SessionEnv \n"
count = count + 1
else:
Stat49 = "Setting 'SessionEnv' requires change: Ensure 'Remote Desktop Configuration (SessionEnv)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['TermService']['start']) == 3 ):
Stat50 = "No need to change Control: TermService \n"
count = count + 1
else:
Stat50 = "Setting 'TermService' requires change: Ensure 'Remote Desktop Services (TermService)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['UmRdpService']['start']) == 3 ):
Stat51 = "No need to change Control: UmRdpService \n"
count = count + 1
else:
Stat51 = "Setting 'UmRdpService' requires change:Ensure 'Remote Desktop Services UserMode Port Redirector (UmRdpService)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['RpcLocator']['start']) == 3 ):
Stat52 = "No need to change Control: RpcLocator \n"
count = count + 1
else:
Stat52 = "Setting 'RpcLocator' requires change: Ensure 'Remote Procedure Call (RPC) Locator (RpcLocator)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['RemoteRegistry']['start']) == 2 ):
Stat53 = "No need to change Control: RemoteRegistry \n"
count = count + 1
else:
Stat53 = "Setting 'RemoteRegistry' requires change: Ensure 'Remote Registry (RemoteRegistry)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['RemoteAccess']['start']) == 4 ):
Stat54 = "No need to change Control: RemoteAccess \n"
count = count + 1
else:
Stat54 = "Setting 'RemoteAccess' requires change: Ensure 'Routing and Remote Access (RemoteAccess)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['LanmanServer']['start']) == 4 ):
Stat55 = "No need to change Control: LanmanServer \n"
count = count + 1
else:
Stat55 = "Setting 'LanmanServer' requires change: Ensure 'Server (LanmanServer)' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['sealsecure']['sealsecurechannel']) == 1 ):
Stat56 = "No need to change Control: SealSecureChannel \n"
count = count + 1
else:
Stat56 = "Setting 'SealSecureChannel' requires change: Ensure 'Domain member: Digitally encrypt secure channel data (when possible)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['signsecure']['signsecurechannel']) == 1 ):
Stat57 = "No need to change Control: SignSecureChannel \n"
count = count + 1
else:
Stat57 = "Setting 'SignSecureChannel' requires change: Ensure 'Domain member: Digitally sign secure channel data (when possible)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['disablepasschange']['disablepasswordchange']) == 1 ):
Stat58 = "No need to change Control: DisablePasswordChange \n"
count = count + 1
else:
Stat58 = "Setting 'DisablePasswordChange' requires change: Ensure 'Domain member: Disable machine account password changes' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['machinemaxpasswrdage']['maximumpasswordage']) <= 30 and int(config['machinemaxpasswrdage']['maximumpasswordage']) != 0 ):
Stat59 = "No need to change Control: MaximumPasswordAge \n"
count = count + 1
else:
Stat59 = "Setting 'MaximumPasswordAge' requires change: Ensure 'Domain member: Maximum machine account password age' is set to '30 or fewer days, but not 0' in GPO \n \n"
count2 = count2 + 1
if(int(config['requirestrongkey']['requirestrongkey']) == 1):
Stat60 = "No need to change Control: RequireStrongKey \n"
count = count + 1
else:
Stat60 = "Setting 'RequireStrongKey' requires change: Ensure 'Domain member: Require strong (Windows 2000 or later) session key' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['autorestartsignon']['disableautomaticrestartsignon']) == 0):
Stat61 = "No need to change Control: DisableAutomaticRestartSignOn \n"
count = count + 1
else:
Stat61 = "Setting 'DisableAutomaticRestartSignOn' requires change: Ensure 'Sign-in and lock last interactive user automatically after a restart' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['cachedlogons']['cachedlogonscount']) <= 4):
Stat62 = "No need to change Control: CachedLogonsCount \n"
count = count + 1
else:
Stat62 = "Setting 'CachedLogonsCount' requires change: Ensure 'Interactive logon: Number of previous logons to cache (in case domain controller is not available)' is set to '4 or fewer logon(s)' in GPO \n \n"
count2 = count2 + 1
if(int(config['passexpirywarn']['passwordexpirywarning']) >= 5 or int(config['passexpirywarn']['passwordexpirywarning']) <= 14):
Stat63 = "No need to change Control: PasswordExpiryWarning \n"
count = count + 1
else:
Stat63 = "Setting 'PasswordExpiryWarning' requires change: 'Interactive logon: Prompt user to change password before expiration' is set to 'between 5 and 14 days' in GPO \n \n"
count2 = count2 + 1
if(int(config['scremove']['scremoveoption']) >= 1):
Stat64 = "No need to change Control: ScRemoveOption \n"
count = count + 1
else:
Stat64 = "Setting 'ScRemoveOption' requires change: Ensure 'Interactive logon: Smart card removal behavior' is set to 'Lock Workstation' or higher in GPO \n \n"
count2 = count2 + 1
if(int(config['disableexceptionchainvalid']['disableexceptionchainvalidation']) == 1):
Stat65 = "No need to change Control: DisableExceptionChainValidation \n"
count = count + 1
else:
Stat65 = "Setting 'DisableExceptionChainValidation' requires change: Ensure 'Enable Structured Exception Handling Overwrite Protection (SEHOP)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['ObCaseInsensitive']['obcaseinsensitive']) == 1):
Stat66 = "No need to change Control: ObCaseInsensitive \n"
count = count + 1
else:
Stat66 = "Setting 'ObCaseInsensitive' requires change: Ensure 'System objects: Require case insensitivity for non-Windows subsystems' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['forceunlocklog']['forceunlocklogon']) == 1):
Stat67 = "No need to change Control: ForceUnlockLogon \n"
count = count + 1
else:
Stat67 = "Setting 'ForceUnlockLogon' requires change: Ensure 'Interactive logon: Require Domain Controller Authentication to unlock workstation' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['restrictanonsam']['restrictanonymoussam']) == 1):
Stat68 = "No need to change Control: RestrictAnonymousSAM \n"
count = count + 1
else:
Stat68 = "Setting 'RestrictAnonymousSAM' requires change: Ensure 'Network access: Do not allow anonymous enumeration of SAM accounts' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['shutdownnologon']['shutdownwithoutlogon']) == 0):
Stat69 = "No need to change Control: ShutdownWithoutLogon \n"
count = count + 1
else:
Stat69 = "Setting 'ShutdownWithoutLogon' requires change: Ensure 'Shutdown: Allow system to be shut down without having to log on' is set to 'Disabled' in GPO \n \n"
count2 = count2 + 1
if(int(config['ProtectionMode']['protectionmode']) == 1):
Stat70 = "No need to change Control: ProtectionMode \n"
count = count + 1
else:
Stat70 = "Setting 'ProtectionMode' requires change: Ensure 'System objects: Strengthen default permissions of internal system objects (e.g. Symbolic Links)' is set to 'Enabled' in GPO \n \n"
count2 = count2 + 1
#print(config.sections())
print("\n")
print("============================================================== \n")
listbox.insert(0, "Writing to " + timestr + " in program folder.")
listbox.insert(1, " ")
listbox.insert(2, "Account Security + Remediations")
listbox.insert(3, " ")
listbox.insert(4, Stat1)
listbox.insert(4, Stat2)
listbox.insert(4, Stat3)
listbox.insert(4, Stat4)
listbox.insert(4, Stat5)
listbox.insert(4, Stat6)
listbox.insert(4, Stat7)
listbox.insert(4, Stat8)
listbox.insert(4, Stat9)
listbox.insert(4, Stat10)
listbox.insert(4, Stat11)
listbox.insert(4, Stat12)
listbox.insert(4, Stat13)
listbox.insert(4, Stat14)
listbox.insert(4, Stat15)
listbox.insert(4, Stat16)
listbox.insert(4, Stat17)
listbox.insert(4, Stat18)
listbox.insert(4, Stat19)
listbox.insert(4, Stat20)
listbox.insert(4, Stat21)
listbox.insert(4, Stat22)
listbox.insert(4, Stat23)
listbox.insert(4, Stat24)
listbox.insert(4, Stat25)
listbox.insert(4, Stat26)
listbox.insert(4, Stat27)
listbox.insert(4, Stat28)
listbox.insert(4, Stat29)
listbox.insert(4, Stat30)
listbox.insert(4, Stat31)
listbox.insert(4, Stat32)
listbox.insert(4, Stat33)
listbox.insert(4, Stat34)
listbox.insert(4, Stat35)
listbox.insert(4, Stat36)
listbox.insert(4, Stat37)
listbox.insert(4, Stat38)
listbox.insert(4, Stat39)
listbox.insert(4, Stat40)
listbox.insert(4, Stat41)
listbox.insert(4, Stat42)
listbox.insert(4, Stat43)
listbox.insert(4, Stat44)
listbox.insert(4, Stat45)
listbox.insert(4, Stat46)
listbox.insert(4, Stat47)
listbox.insert(4, Stat48)
listbox.insert(4, Stat49)
listbox.insert(4, Stat50)
listbox.insert(4, Stat51)
listbox.insert(4, Stat52)
listbox.insert(4, Stat53)
listbox.insert(4, Stat54)
listbox.insert(4, Stat55)
listbox.insert(4, Stat56)
listbox.insert(4, Stat57)
listbox.insert(4, Stat58)
listbox.insert(4, Stat59)
listbox.insert(4, Stat60)
listbox.insert(4, Stat61)
listbox.insert(4, Stat62)
listbox.insert(4, Stat63)
listbox.insert(4, Stat64)
listbox.insert(4, Stat65)
listbox.insert(4, Stat66)
listbox.insert(4, Stat67)
listbox.insert(4, Stat68)
listbox.insert(4, Stat69)
listbox.insert(4, Stat70)
listbox2.insert(0, "\nNumber of Compliant controls")
listbox2.insert(1, "--> " + str(count))
listbox2.insert(2, "Number of Non-Compliant controls")
listbox2.insert(3, "--> " + str(count2))
def saveScan():
date = time.strftime("%Y_%m_%d-%I_%M_%S_%p")
datestr = date + " Windows Report.txt"
with open(datestr, 'w') as f:
f.write("--Configuration Scan--\n\n")
f.write('\n'.join(listbox.get('0', 'end')))
f.write('\n'.join(listbox2.get('0', 'end')))
f.close()
def saveScan2():
date = time.strftime("%Y_%m_%d-%I_%M_%S_%p")
datestr = date + " Browser Report.txt"
with open(datestr, 'w') as f:
f.write("--Configuration Scan--\n\n")
f.write('\n'.join(listbox3.get('0', 'end')))
f.write('\n'.join(listbox4.get('0', 'end')))
f.close()
def deleteScan():
listbox.delete('0', 'end')
listbox2.delete('0', 'end')
def deleteScan2():
listbox3.delete('0', 'end')
listbox4.delete('0', 'end')
def googlescan():
host2 = IP2.get()
domain2 = Domain2.get()
user2 = username2.get()
password2 = passwd2.get()
session = winrm.Session(host2, auth=('{}@{}' .format(user2 ,domain2), password2), transport='ntlm')
import time
import configparser
config = configparser.ConfigParser()
time = time.strftime("%Y_%m_%d-%I_%M_%S_%p")
timestr = time + " Browser Settings.ini"
def remotehostcurtain():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RemoteAccessHostRequireCurtain" | Select-Object RemoteAccessHostRequireCurtain')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['remotehostcurtain'] = p_dict
def remotehostuiremoteassist():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RemoteAccessHostAllowUiAccessForRemoteAssistance" | Select-Object RemoteAccessHostAllowUiAccessForRemoteAssistance')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['remotehostuiremoteassist'] = p_dict
def BackgroundModeEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "BackgroundModeEnabled" | Select-Object BackgroundModeEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['BackgroundModeEnabled'] = p_dict
def PromptForDownloadLocation():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "PromptForDownloadLocation" | Select-Object PromptForDownloadLocation')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['PromptForDownloadLocation'] = p_dict
def savebrowserhistory():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SavingBrowserHistoryDisabled" | Select-Object SavingBrowserHistoryDisabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['savebrowserhistory'] = p_dict
def ComponentUpdatesEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "ComponentUpdatesEnabled" | Select-Object ComponentUpdatesEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ComponentUpdatesEnabled'] = p_dict
def ThirdPartyBlockingEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "ThirdPartyBlockingEnabled" | Select-Object ThirdPartyBlockingEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ThirdPartyBlockingEnabled'] = p_dict
def SuppressUnsupportedOSWarning():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SuppressUnsupportedOSWarning" | Select-Object SuppressUnsupportedOSWarning')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SuppressUnsupportedOSWarning'] = p_dict
def EnableOnlineRevocationChecks():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "EnableOnlineRevocationChecks" | Select-Object EnableOnlineRevocationChecks')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['EnableOnlineRevocationChecks'] = p_dict
def SafeSitesFilterBehavior():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SafeSitesFilterBehavior" | Select-Object SafeSitesFilterBehavior')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SafeSitesFilterBehavior'] = p_dict
def DefaultNotificationsSetting():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DefaultNotificationsSetting" | Select-Object DefaultNotificationsSetting')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['DefaultNotificationsSetting'] = p_dict
def Defaultbluetooth():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DefaultWebBluetoothGuardSetting" | Select-Object DefaultWebBluetoothGuardSetting')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['Defaultbluetooth'] = p_dict
def DefaultWebUsbGuardSetting():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DefaultWebUsbGuardSetting" | Select-Object DefaultWebUsbGuardSetting')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['DefaultWebUsbGuardSetting'] = p_dict
def PasswordManagerEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "PasswordManagerEnabled" | Select-Object PasswordManagerEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['PasswordManagerEnabled'] = p_dict
def AuthSchemes():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "AuthSchemes" | Select-Object AuthSchemes')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------", "-")
p_list = p_output.split("-")
for i in p_list:
p_dict = dict([p_list])
config['AuthSchemes'] = p_dict
def CloudPrintProxyEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "CloudPrintProxyEnabled" | Select-Object CloudPrintProxyEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['CloudPrintProxyEnabled'] = p_dict
def SitePerProcess():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SitePerProcess" | Select-Object SitePerProcess')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SitePerProcess'] = p_dict
def DownloadRestrictions():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DownloadRestrictions" | Select-Object DownloadRestrictions')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['DownloadRestrictions'] = p_dict
def disablesafebrowsing():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DisableSafeBrowsingProceedAnyway" | Select-Object DisableSafeBrowsingProceedAnyway')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['disablesafebrowsing'] = p_dict
def RelaunchNotification():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RelaunchNotification" | Select-Object RelaunchNotification')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['RelaunchNotification'] = p_dict
def RelaunchNotificationPeriod():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RelaunchNotificationPeriod" | Select-Object RelaunchNotificationPeriod')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['RelaunchNotificationPeriod'] = p_dict
def revocationchecklocalanchor():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RequireOnlineRevocationChecksForLocalAnchors" | Select-Object RequireOnlineRevocationChecksForLocalAnchors')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['revocationchecklocalanchor'] = p_dict
def ChromeCleanupEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "ChromeCleanupEnabled" | Select-Object ChromeCleanupEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ChromeCleanupEnabled'] = p_dict
def BuiltInDnsClientEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "BuiltInDnsClientEnabled" | Select-Object BuiltInDnsClientEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['BuiltInDnsClientEnabled'] = p_dict
def DefaultCookiesSetting():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DefaultCookiesSetting" | Select-Object DefaultCookiesSetting')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['DefaultCookiesSetting'] = p_dict
def DefaultGeolocationSetting():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "DefaultGeolocationSetting" | Select-Object DefaultGeolocationSetting')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['DefaultGeolocationSetting'] = p_dict
def EnableMediaRouter():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "EnableMediaRouter" | Select-Object EnableMediaRouter')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['EnableMediaRouter'] = p_dict
def BlockThirdPartyCookies():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "BlockThirdPartyCookies" | Select-Object BlockThirdPartyCookies')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['BlockThirdPartyCookies'] = p_dict
def MetricsReportingEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "MetricsReportingEnabled" | Select-Object MetricsReportingEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['MetricsReportingEnabled'] = p_dict
def chromecleanupreport():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "ChromeCleanupReportingEnabled" | Select-Object ChromeCleanupReportingEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['chromecleanupreport'] = p_dict
def BrowserSignin():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "BrowserSignin" | Select-Object BrowserSignin')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['BrowserSignin'] = p_dict
def TranslateEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "TranslateEnabled" | Select-Object TranslateEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['TranslateEnabled'] = p_dict
def NetworkPredictionOptions():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "NetworkPredictionOptions" | Select-Object NetworkPredictionOptions')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['NetworkPredictionOptions'] = p_dict
def SearchSuggestEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SearchSuggestEnabled" | Select-Object SearchSuggestEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SearchSuggestEnabled'] = p_dict
def SpellCheckServiceEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SpellCheckServiceEnabled" | Select-Object SpellCheckServiceEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SpellCheckServiceEnabled'] = p_dict
def AlternateErrorPagesEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "AlternateErrorPagesEnabled" | Select-Object AlternateErrorPagesEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['AlternateErrorPagesEnabled'] = p_dict
def SyncDisabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SyncDisabled" | Select-Object SyncDisabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['SyncDisabled'] = p_dict
def safebrowsingtrustedsource():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "SafeBrowsingForTrustedSourcesEnabled" | Select-Object SafeBrowsingForTrustedSourcesEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("------------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['safebrowsingtrustedsource'] = p_dict
def urlkeyeddatacollect():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "UrlKeyedAnonymizedDataCollectionEnabled" | Select-Object UrlKeyedAnonymizedDataCollectionEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['urlkeyeddatacollect'] = p_dict
def allowdeletebrowserhistory():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "AllowDeletingBrowserHistory" | Select-Object AllowDeletingBrowserHistory')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['allowdeletebrowserhistory'] = p_dict
def remoteaccessfirewalltraverse():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RemoteAccessHostFirewallTraversal" | Select-Object RemoteAccessHostFirewallTraversal')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("---------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['remoteaccessfirewalltraverse'] = p_dict
def remoteaccessclientpair():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RemoteAccessHostAllowClientPairing" | Select-Object RemoteAccessHostAllowClientPairing')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['remoteaccessclientpair'] = p_dict
def remoteaccessrelayconnect():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "RemoteAccessHostAllowRelayedConnection" | Select-Object RemoteAccessHostAllowRelayedConnection')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['remoteaccessrelayconnect'] = p_dict
def CloudPrintSubmitEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "CloudPrintSubmitEnabled" | Select-Object CloudPrintSubmitEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['CloudPrintSubmitEnabled'] = p_dict
def ImportSavedPasswords():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "ImportSavedPasswords" | Select-Object ImportSavedPasswords')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("--------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['ImportSavedPasswords'] = p_dict
def AutofillCreditCardEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "AutofillCreditCardEnabled" | Select-Object AutofillCreditCardEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("-------------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['AutofillCreditCardEnabled'] = p_dict
def AutofillAddressEnabled():
p = session.run_ps('Get-ItemProperty -Path "HKLM:SOFTWARE\Policies\Google\Chrome" -Name "AutofillAddressEnabled" | Select-Object AutofillAddressEnabled')
p_output = str(p.std_out)
p_output = p_output.replace("b\'", "")
p_output = p_output.replace("\\r", "")
p_output = p_output.replace("\\n", "")
p_output = p_output.replace("\'", "")
p_output = p_output.replace(" ", "")
p_output = p_output.replace("----------------------", "-")
p_list = p_output.split("-")
p_dict = dict([p_list])
config['AutofillAddressEnabled'] = p_dict
remotehostcurtain()
remotehostuiremoteassist()
BackgroundModeEnabled()
PromptForDownloadLocation()
savebrowserhistory()
ComponentUpdatesEnabled()
ThirdPartyBlockingEnabled()
SuppressUnsupportedOSWarning()
EnableOnlineRevocationChecks()
SafeSitesFilterBehavior()
DefaultNotificationsSetting()
Defaultbluetooth()
DefaultWebUsbGuardSetting()
PasswordManagerEnabled()
AuthSchemes()
CloudPrintProxyEnabled()
SitePerProcess()
DownloadRestrictions()
disablesafebrowsing()
RelaunchNotification()
RelaunchNotificationPeriod()
revocationchecklocalanchor()
ChromeCleanupEnabled()
BuiltInDnsClientEnabled()
DefaultCookiesSetting()
DefaultGeolocationSetting()
EnableMediaRouter()
BlockThirdPartyCookies()
MetricsReportingEnabled()
chromecleanupreport()
BrowserSignin()
TranslateEnabled()
NetworkPredictionOptions()
SearchSuggestEnabled()
SpellCheckServiceEnabled()
AlternateErrorPagesEnabled()
SyncDisabled()
safebrowsingtrustedsource()
urlkeyeddatacollect()
allowdeletebrowserhistory()
remoteaccessfirewalltraverse()
remoteaccessclientpair()
remoteaccessrelayconnect()
CloudPrintSubmitEnabled()
ImportSavedPasswords()
AutofillCreditCardEnabled()
AutofillAddressEnabled()
with open(timestr,'w') as configfile:
config.write(configfile)
config.read(timestr)
count = 0
count2 = 0
print("\n")
print("==============================================================")
print("\n")
print("Browser Controls \n")
if(int(config['remotehostcurtain']['remoteaccesshostrequirecurtain']) == 0):
Stat1 = "No need to change Control: RemoteAccessHostRequireCurtain \n"
count = count + 1
else:
Stat1 = "Setting 'RemoteAccessHostRequireCurtain' requires change: 1.1.1 (L1) Ensure 'Enable curtaining of remote access hosts' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['remotehostuiremoteassist']['remoteaccesshostallowuiaccessforremoteassistance']) == 0):
Stat2 = "No need to change Control: RemoteAccessHostAllowUiAccessForRemoteAssistance \n"
count = count + 1
else:
Stat2 = "Setting 'RemoteAccessHostAllowUiAccessForRemoteAssistance' requires change: 1.1.3 (L1) Ensure 'Allow remote users to interact with elevated windows in remote assistance sessions' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['BackgroundModeEnabled']['backgroundmodeenabled']) == 0):
Stat3 = "No need to change Control: BackgroundModeEnabled \n"
count = count + 1
else:
Stat3 = "Setting 'BackgroundModeEnabled' requires change: 1.2 (L1) Ensure 'Continue running background apps when Google Chrome is closed' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['PromptForDownloadLocation']['promptfordownloadlocation']) == 1):
Stat4 = "No need to change Control: PromptForDownloadLocation \n"
count = count + 1
else:
Stat4 = "Setting 'PromptForDownloadLocation' requires change: 1.3 (L1) Ensure 'Ask where to save each file before downloading' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['savebrowserhistory']['savingbrowserhistorydisabled']) == 0):
Stat5 = "No need to change Control: SavingBrowserHistoryDisabled \n"
count = count + 1
else:
Stat5 = "Setting 'SavingBrowserHistoryDisabled' requires change: 1.4 (L1) Ensure 'Disable saving browser history' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['ComponentUpdatesEnabled']['componentupdatesenabled']) == 1):
Stat6 = "No need to change Control: ComponentUpdatesEnabled \n"
count = count + 1
else:
Stat6 = "Setting 'ComponentUpdatesEnabled' requires change: 1.6 (L1) Ensure 'Enable component updates in Google Chrome' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['ThirdPartyBlockingEnabled']['thirdpartyblockingenabled']) == 1):
Stat7 = "No need to change Control: ThirdPartyBlockingEnabled \n"
count = count + 1
else:
Stat7 = "Setting 'ThirdPartyBlockingEnabled' requires change: 1.8 (L1) Ensure 'Enable third party software injection blocking' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['SuppressUnsupportedOSWarning']['suppressunsupportedoswarning']) == 0):
Stat8 = "No need to change Control: SuppressUnsupportedOSWarning \n"
count = count + 1
else:
Stat8 = "Setting 'SuppressUnsupportedOSWarning' requires change: 1.10 (L1) Ensure 'Suppress the unsupported OS warning' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['EnableOnlineRevocationChecks']['enableonlinerevocationchecks']) == 0):
Stat9 = "No need to change Control: EnableOnlineRevocationChecks \n"
count = count + 1
else:
Stat9 = "Setting 'EnableOnlineRevocationChecks' requires change: 1.11 (L1) Ensure 'Whether online OCSP/CRL checks are performed' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['SafeSitesFilterBehavior']['safesitesfilterbehavior']) >= 1):
Stat10 = "No need to change Control: SafeSitesFilterBehavior \n"
count = count + 1
else:
Stat10 = "Setting 'SafeSitesFilterBehavior' requires change: 1.13 (L1) Ensure 'Control SafeSites adult content filtering' is set to 'Enabled' with value 'Do not filter sites for adult content' specified \n"
count2 = count2 + 1
if(int(config['DefaultNotificationsSetting']['defaultnotificationssetting']) >= 1):
Stat11 = "No need to change Control: DefaultNotificationsSetting \n"
count = count + 1
else:
Stat11 = "Setting 'DefaultNotificationsSetting' requires change: 2.2 (L2) Ensure 'Default notification setting' is set to 'Enabled' with 'Do not allow any site to show desktop notifications' \n"
count2 = count2 + 1
if(int(config['Defaultbluetooth']['defaultwebbluetoothguardsetting']) >= 1):
Stat12 = "No need to change Control: DefaultWebBluetoothGuardSetting \n"
count = count + 1
else:
Stat12 = "Setting 'DefaultWebBluetoothGuardSetting' requires change: 2.3 (L2) Ensure 'Control use of the Web Bluetooth API' is set to 'Enabled' with 'Do not allow any site to request access to Bluetooth devices via the Web Bluetooth API' \n"
count2 = count2 + 1
if(int(config['DefaultWebUsbGuardSetting']['defaultwebusbguardsetting']) >= 1):
Stat13 = "No need to change Control: DefaultWebUsbGuardSetting \n"
count = count + 1
else:
Stat13 = "Setting 'DefaultWebUsbGuardSetting' requires change: 2.4 (L2) Ensure 'Control use of the WebUSB API' is set to 'Enabled' with 'Do not allow any site to request access to USB devices via the WebUSB API' \n"
count2 = count2 + 1
if(int(config['PasswordManagerEnabled']['passwordmanagerenabled']) >= 1):
Stat14 = "No need to change Control: PasswordManagerEnabled \n"
count = count + 1
else:
Stat14 = "Setting 'PasswordManagerEnabled' requires change: 2.8 (L1) Ensure 'Enable saving passwords to the password manager' is Configured \n"
count2 = count2 + 1
if(config['AuthSchemes']['authschemes'] != ""):
Stat15 = "No need to change Control: AuthSchemes \n"
count = count + 1
else:
Stat15 = "Setting 'AuthSchemes' requires change: 2.9 (L1) Ensure 'Supported authentication schemes' is set to 'Enabled' (ntlm, negotiate) \n"
count2 = count2 + 1
if(int(config['CloudPrintProxyEnabled']['cloudprintproxyenabled']) == 0):
Stat16 = "No need to change Control: CloudPrintProxyEnabled \n"
count = count + 1
else:
Stat16 = "Setting 'CloudPrintProxyEnabled' requires change: 2.12 (L1) Ensure 'Enable Google Cloud Print Proxy' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['SitePerProcess']['siteperprocess']) == 1):
Stat17 = "No need to change Control: SitePerProcess \n"
count = count + 1
else:
Stat17 = "Setting 'SitePerProcess' requires change: 2.13 (L1) Ensure 'Enable Site Isolation for every site' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['DownloadRestrictions']['downloadrestrictions']) >= 1):
Stat18 = "No need to change Control: DownloadRestrictions \n"
count = count + 1
else:
Stat18 = "Setting 'DownloadRestrictions' requires change: 2.14 (L1) Ensure 'Allow download restrictions' is set to 'Enabled' with 'Block dangerous downloads' specified \n"
count2 = count2 + 1
if(int(config['disablesafebrowsing']['disablesafebrowsingproceedanyway']) == 1):
Stat19 = "No need to change Control: DisableSafeBrowsingProceedAnyway \n"
count = count + 1
else:
Stat19 = "Setting 'DisableSafeBrowsingProceedAnyway' requires change: 2.15 (L1) Ensure 'Disable proceeding from the Safe Browsing warning page' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['RelaunchNotification']['relaunchnotification']) >= 1):
Stat20 = "No need to change Control: RelaunchNotification \n"
count = count + 1
else:
Stat20 = "Setting 'RelaunchNotification' requires change: 2.16 (L1) Ensure 'Notify a user that a browser relaunch or device restart is recommended or required' is set to 'Enabled' with 'Show a recurring prompt to the user indication that a relaunch is required' specified \n"
count2 = count2 + 1
if(int(config['RelaunchNotificationPeriod']['relaunchnotificationperiod']) >= 86400000):
Stat21 = "No need to change Control: RelaunchNotificationPeriod \n"
count = count + 1
else:
Stat21 = "Setting 'RelaunchNotificationPeriod' requires change: 2.17 (L1) Ensure 'Set the time period for update notifications' is set to 'Enabled' with '86400000' (1 day) specified \n"
count2 = count2 + 1
if(int(config['revocationchecklocalanchor']['requireonlinerevocationchecksforlocalanchors']) == 1):
Stat22 = "No need to change Control: RequireOnlineRevocationChecksForLocalAnchors \n"
count = count + 1
else:
Stat22 = "Setting 'RequireOnlineRevocationChecksForLocalAnchors' requires change: 2.18 (L2) Ensure 'Whether online OCSP/CRL checks are required for local trust anchors' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['ChromeCleanupEnabled']['chromecleanupenabled']) >= 0):
Stat23 = "No need to change Control: ChromeCleanupEnabled \n"
count = count + 1
else:
Stat23 = "Setting 'ChromeCleanupEnabled' requires change: 2.19 (L1) Ensure 'Enable Chrome Cleanup on Windows' is Configured \n"
count2 = count2 + 1
if(int(config['BuiltInDnsClientEnabled']['builtindnsclientenabled']) == 0):
Stat24 = "No need to change Control: BuiltInDnsClientEnabled \n"
count = count + 1
else:
Stat24 = "Setting 'BuiltInDnsClientEnabled' requires change: 2.20 (L2) Ensure 'Use built-in DNS client' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['DefaultCookiesSetting']['defaultcookiessetting']) == 1):
Stat25 = "No need to change Control: DefaultCookiesSetting \n"
count = count + 1
else:
Stat25 = "Setting 'DefaultCookiesSetting' requires change: 3.1 (L2) Ensure 'Default cookies setting' is set to 'Enabled' (Keep cookies for the duration of the session) \n"
count2 = count2 + 1
if(int(config['DefaultGeolocationSetting']['defaultgeolocationsetting']) >= 1):
Stat26 = "No need to change Control: DefaultGeolocationSetting \n"
count = count + 1
else:
Stat26 = "Setting 'DefaultGeolocationSetting' requires change: 3.2 (L1) Ensure 'Default geolocation setting' is set to 'Enabled' with 'Do not allow any site to track the users' physical location' \n"
count2 = count2 + 1
if(int(config['EnableMediaRouter']['enablemediarouter']) == 0):
Stat27 = "No need to change Control: EnableMediaRouter \n"
count = count + 1
else:
Stat27 = "Setting 'EnableMediaRouter' requires change: 3.3 (L1) Ensure 'Enable Google Cast' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['BlockThirdPartyCookies']['blockthirdpartycookies']) == 1):
Stat28 = "No need to change Control: BlockThirdPartyCookies \n"
count = count + 1
else:
Stat28 = "Setting 'BlockThirdPartyCookies' requires change: 3.4 (L1) Ensure 'Block third party cookies' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['MetricsReportingEnabled']['metricsreportingenabled']) == 0):
Stat29 = "No need to change Control: MetricsReportingEnabled \n"
count = count + 1
else:
Stat29 = "Setting 'MetricsReportingEnabled' requires change: 3.5 (L1) Ensure 'Enable reporting of usage and crash-related data' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['chromecleanupreport']['chromecleanupreportingenabled']) == 0):
Stat30 = "No need to change Control: ChromeCleanupReportingEnabled \n"
count = count + 1
else:
Stat30 = "Setting 'ChromeCleanupReportingEnabled' requires change: 3.6 (L1) Ensure 'Control how Chrome Cleanup reports data to Google' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['BrowserSignin']['browsersignin']) >= 1):
Stat31 = "No need to change Control: BrowserSignin \n"
count = count + 1
else:
Stat31 = "Setting 'BrowserSignin' requires change: 3.7 (L1) Ensure 'Browser sign in settings' is set to 'Enabled' with 'Disabled browser sign-in' specified \n"
count2 = count2 + 1
if(int(config['TranslateEnabled']['translateenabled']) == 0):
Stat32 = "No need to change Control: TranslateEnabled \n"
count = count + 1
else:
Stat32 = "Setting 'TranslateEnabled' requires change: 3.8 (L1) Ensure 'Enable Translate' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['NetworkPredictionOptions']['networkpredictionoptions']) >= 1):
Stat33 = "No need to change Control: NetworkPredictionOptions \n"
count = count + 1
else:
Stat33 = "Setting 'NetworkPredictionOptions' requires change: 3.9 (L1) Ensure 'Enable network prediction' is set to 'Enabled' with 'Do not predict actions on any network connection' selected \n"
count2 = count2 + 1
if(int(config['SearchSuggestEnabled']['searchsuggestenabled']) == 0):
Stat34 = "No need to change Control: SearchSuggestEnabled \n"
count = count + 1
else:
Stat34 = "Setting 'SearchSuggestEnabled' requires change: 3.10 (L1) Ensure 'Enable search suggestions' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['SpellCheckServiceEnabled']['spellcheckserviceenabled']) == 0):
Stat35 = "No need to change Control: SpellCheckServiceEnabled \n"
count = count + 1
else:
Stat35 = "Setting 'SpellCheckServiceEnabled' requires change: 3.11 (L1) Ensure 'Enable or disable spell checking web service' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['AlternateErrorPagesEnabled']['alternateerrorpagesenabled']) == 0):
Stat36 = "No need to change Control: AlternateErrorPagesEnabled \n"
count = count + 1
else:
Stat36 = "Setting 'AlternateErrorPagesEnabled' requires change: 3.12 (L1) Ensure 'Enable alternate error pages' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['SyncDisabled']['syncdisabled']) == 1):
Stat37 = "No need to change Control: SyncDisabled \n"
count = count + 1
else:
Stat37 = "Setting 'SyncDisabled' requires change: 3.13 (L1) Ensure 'Disable synchronization of data with Google' is set to 'Enabled' \n"
count2 = count2 + 1
if(int(config['safebrowsingtrustedsource']['safebrowsingfortrustedsourcesenabled']) == 0):
Stat38 = "No need to change Control: SafeBrowsingForTrustedSourcesEnabled \n"
count = count + 1
else:
Stat38 = "Setting 'SafeBrowsingForTrustedSourcesEnabled' requires change: 3.14 (L1) Ensure 'Enable Safe Browsing for trusted sources' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['urlkeyeddatacollect']['urlkeyedanonymizeddatacollectionenabled']) == 0):
Stat39 = "No need to change Control: UrlKeyedAnonymizedDataCollectionEnabled \n"
count = count + 1
else:
Stat39 = "Setting 'UrlKeyedAnonymizedDataCollectionEnabled' requires change: 3.15 (L1) Ensure 'Enable URL-keyed anonymized data collection' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['allowdeletebrowserhistory']['allowdeletingbrowserhistory']) == 0):
Stat40 = "No need to change Control: AllowDeletingBrowserHistory \n"
count = count + 1
else:
Stat40 = "Setting 'AllowDeletingBrowserHistory' requires change: 3.16 (L1) Ensure 'Enable deleting browser and download history' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['remoteaccessfirewalltraverse']['remoteaccesshostfirewalltraversal']) == 0):
Stat41 = "No need to change Control: RemoteAccessHostFirewallTraversal \n"
count = count + 1
else:
Stat41 = "Setting 'RemoteAccessHostFirewallTraversal' requires change: 4.1.1 (L1) Ensure 'Enable firewall traversal from remote access host' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['remoteaccessclientpair']['remoteaccesshostallowclientpairing']) == 0):
Stat42 = "No need to change Control: RemoteAccessHostAllowClientPairing \n"
count = count + 1
else:
Stat42 = "Setting 'RemoteAccessHostAllowClientPairing' requires change: 4.1.2 (L1) Ensure 'Enable or disable PIN-less authentication for remote access hosts' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['remoteaccessrelayconnect']['remoteaccesshostallowrelayedconnection']) == 0):
Stat43 = "No need to change Control: RemoteAccessHostAllowRelayedConnection \n"
count = count + 1
else:
Stat43 = "Setting 'RemoteAccessHostAllowRelayedConnection' requires change: 4.1.3 (L1) Ensure 'Enable the use of relay servers by the remote access host' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['CloudPrintSubmitEnabled']['cloudprintsubmitenabled']) == 0):
Stat44 = "No need to change Control: CloudPrintSubmitEnabled \n"
count = count + 1
else:
Stat44 = "Setting 'CloudPrintSubmitEnabled' requires change: 5.1 (L1) Ensure 'Enable submission of documents to Google Cloud print' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['ImportSavedPasswords']['importsavedpasswords']) == 0):
Stat45 = "No need to change Control: ImportSavedPasswords \n"
count = count + 1
else:
Stat45 = "Setting 'ImportSavedPasswords' requires change: 5.2 (L1) Ensure 'Import saved passwords from default browser on first run' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['AutofillCreditCardEnabled']['autofillcreditcardenabled']) == 0):
Stat46 = "No need to change Control: AutofillCreditCardEnabled \n"
count = count + 1
else:
Stat46 = "Setting 'AutofillCreditCardEnabled' requires change: 5.3 (L1) Ensure 'Enable AutoFill for credit cards' is set to 'Disabled' \n"
count2 = count2 + 1
if(int(config['AutofillAddressEnabled']['autofilladdressenabled']) == 0):
Stat47 = "No need to change Control: AutofillAddressEnabled \n"
count = count + 1
else:
Stat47 = "Setting 'AutofillAddressEnabled' requires change: 5.4 (L1) Ensure 'Enable AutoFill for addresses' is set to 'Disabled' \n"
count2 = count2 + 1
#print(config.sections())
print("\n")
print("============================================================== \n")
listbox3.insert(0, "Writing to " + timestr + " in program folder.")
listbox3.insert(1, " ")
listbox3.insert(2, "Browser Security + Remediations")
listbox3.insert(3, " ")
listbox3.insert(4, Stat1)
listbox3.insert(4, Stat2)
listbox3.insert(4, Stat3)
listbox3.insert(4, Stat4)
listbox3.insert(4, Stat5)
listbox3.insert(4, Stat6)
listbox3.insert(4, Stat7)
listbox3.insert(4, Stat8)
listbox3.insert(4, Stat9)
listbox3.insert(4, Stat10)
listbox3.insert(4, Stat11)
listbox3.insert(4, Stat12)
listbox3.insert(4, Stat13)
listbox3.insert(4, Stat14)
listbox3.insert(4, Stat15)
listbox3.insert(4, Stat16)
listbox3.insert(4, Stat17)
listbox3.insert(4, Stat18)
listbox3.insert(4, Stat19)
listbox3.insert(4, Stat20)
listbox3.insert(4, Stat21)
listbox3.insert(4, Stat22)
listbox3.insert(4, Stat23)
listbox3.insert(4, Stat24)
listbox3.insert(4, Stat25)
listbox3.insert(4, Stat26)
listbox3.insert(4, Stat27)
listbox3.insert(4, Stat28)
listbox3.insert(4, Stat29)
listbox3.insert(4, Stat30)
listbox3.insert(4, Stat31)
listbox3.insert(4, Stat32)
listbox3.insert(4, Stat33)
listbox3.insert(4, Stat34)
listbox3.insert(4, Stat35)
listbox3.insert(4, Stat36)
listbox3.insert(4, Stat37)
listbox3.insert(4, Stat38)
listbox3.insert(4, Stat39)
listbox3.insert(4, Stat40)
listbox3.insert(4, Stat41)
listbox3.insert(4, Stat42)
listbox3.insert(4, Stat43)
listbox3.insert(4, Stat44)
listbox3.insert(4, Stat45)
listbox3.insert(4, Stat46)
listbox3.insert(4, Stat47)
listbox4.insert(0, "\nNumber of Compliant controls")
listbox4.insert(1, "--> " + str(count))
listbox4.insert(2, "Number of Non-Compliant controls")
listbox4.insert(3, "--> " + str(count2))
# ==== GUI ====
gui = Tk()
gui.title('IT Risk Audit Baseline Analyzer')
gui.geometry("1200x500+20+20")
tabControl = ttk.Notebook(gui)
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tabControl.add(tab1, text ='Windows 10/Server')
tabControl.add(tab2, text ='Browsers')
tabControl.pack(expand = 1, fill ="both")
# ==== Colors ====
m1c = '#00ee00'
bgc = '#222222'
dbg = '#000000'
fgc = '#111111'
gui.tk_setPalette(background="white", foreground="Black",)
# ==== Labels ====
L11 = Label(tab1, text = "Windows Audit", font=("Helvetica", 16, 'underline', 'bold'))
L11.place(x = 16, y = 10)
textinput1 = Label(tab1, text="Target IP:")
textinput1.place (x = 220, y = 15)
textinput2 = Label(tab1, text="Target domain:")
textinput2.place (x = 460, y = 15)
textinput3 = Label(tab1, text="Target User:")
textinput3.place (x = 220, y = 45)
textinput4 = Label(tab1, text="User Password:")
textinput4.place (x = 460, y = 45)
L26 = Label(tab1, text = "Results: ")
L26.place(x = 16, y = 60)
L27 = Label(tab1, text = "[ ... ]")
L27.place(x = 80, y = 60)
L11 = Label(tab2, text = "Win-Browser Audit", font=("Helvetica", 16, 'underline', 'bold'))
L11.place(x = 16, y = 10)
textinput5 = Label(tab2, text="Target IP:")
textinput5.place (x = 220, y = 15)
textinput6 = Label(tab2, text="Target domain:")
textinput6.place (x = 460, y = 15)
textinput7 = Label(tab2, text="Target User:")
textinput7.place (x = 220, y = 45)
textinput8 = Label(tab2, text="User Password:")
textinput8.place (x = 460, y = 45)
L26 = Label(tab2, text = "Results: ")
L26.place(x = 16, y = 60)
L27 = Label(tab2, text = "[ ... ]")
L27.place(x = 80, y = 60)
# ==== Buttons / Scans ====
L26 = Label(tab1, text = "Scan Options:", font=("Helvetica", 16, 'underline', 'bold'))
L26.place(x = 16, y = 220)
IP = Entry(tab1)
IP.place(x = 280, y = 15)
Domain = Entry(tab1)
Domain.place(x = 550, y = 15)
username = Entry(tab1)
username.place(x = 290, y = 45)
passwd = Entry(tab1, show = '*')
passwd.place(x = 550, y = 45)
B11 = Button(tab1, text = "Basic Scan", command=basic, fg='black')
B11.place(x = 16, y = 270, width = 150, height = 40)
B12 = Button(tab1, text = "Intermediate Scan", command=startScan_Intermediate, fg='black')
B12.place(x = 16, y = 340, width = 150, height = 40)
B21 = Button(tab1, text = "Save Result", command=saveScan, fg='black')
B21.place(x = 200, y = 260, width = 200, height=65)
B21 = Button(tab1, text = "Clear Result", command=deleteScan, fg='black')
B21.place(x = 200, y = 330, width = 200, height=65)
L26 = Label(tab2, text = "Scan Options:", font=("Helvetica", 16, 'underline', 'bold'))
L26.place(x = 16, y = 220)
IP2 = Entry(tab2)
IP2.place(x = 280, y = 15)
Domain2 = Entry(tab2)
Domain2.place(x = 550, y = 15)
username2 = Entry(tab2)
username2.place(x = 290, y = 45)
passwd2 = Entry(tab2, show = '*')
passwd2.place(x = 550, y = 45)
B11 = Button(tab2, text = "Google Chrome Scan", command=googlescan, fg='black')
B11.place(x = 16, y = 270, width = 150, height = 40)
B21 = Button(tab2, text = "Save Result", command=saveScan2, fg='black')
B21.place(x = 200, y = 260, width = 200, height=65)
B21 = Button(tab2, text = "Clear Result", command=deleteScan2, fg='black')
B21.place(x = 200, y = 330, width = 200, height=65)
# ==== Result list ====
frame = Frame(tab1)
frame.place(x = 10, y = 100, width = 1100, height = 100)
listbox = Listbox(frame, width = 1100, height = 6)
listbox.place(x = 0, y = 0)
listbox.bind('<<ListboxSelect>>')
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
listbox.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=listbox.yview)
L1 = Label(tab1, text = "Summary of results:", font=("Helvetica", 16, 'underline', 'bold'))
L1.place(x = 430, y = 250)
frame = Frame(tab1)
frame.place(x = 430, y = 295, width = 260, height = 100)
listbox2 = Listbox(frame, width = 100, height = 8)
listbox2.place(x = 0, y = 0)
listbox2.bind('<<ListboxSelect>>')
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
listbox2.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=listbox.yview)
frame = Frame(tab2)
frame.place(x = 10, y = 100, width = 1100, height = 100)
listbox3 = Listbox(frame, width = 1100, height = 6)
listbox3.place(x = 0, y = 0)
listbox3.bind('<<ListboxSelect>>')
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
listbox3.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=listbox.yview)
L1 = Label(tab2, text = "Summary of results:", font=("Helvetica", 16, 'underline', 'bold'))
L1.place(x = 430, y = 250)
frame = Frame(tab2)
frame.place(x = 430, y = 295, width = 260, height = 100)
listbox4 = Listbox(frame, width = 100, height = 8)
listbox4.place(x = 0, y = 0)
listbox4.bind('<<ListboxSelect>>')
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
listbox4.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=listbox.yview)
# ==== Start GUI ====
gui.mainloop()
| UTF-8 | Python | false | false | 178,932 | py | 3 | Windows_GUI.py | 2 | 0.520321 | 0.506349 | 0.000011 | 3,693 | 46.451665 | 283 |
mwz73trz/information_and_notes | 16,243,566,332,716 | 66277f82f1bf47b0d90840ad0648507d243ea5e5 | c52c526fb5a05fffea5d4a9c7ede97694057c1e8 | /backend/backend_app/serializers.py | 479fa6faaa94897a0b9cac7ee371650b9a1f8740 | []
| no_license | https://github.com/mwz73trz/information_and_notes | 7e00d0f03161849d9e25425e4fb79cf1f8ce4b42 | c15b4d2ac7910a3320d3ee61f3a6e668e77b8906 | refs/heads/main | 2023-08-04T07:10:00.390914 | 2021-09-16T17:27:29 | 2021-09-16T17:27:29 | 406,961,634 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework.serializers import ModelSerializer, StringRelatedField
from backend_app.models import Subject, Title
class SubjectSerializer(ModelSerializer):
class Meta:
model = Subject
fields = ['id', 'name', 'user', 'titles']
depth = 1
user = StringRelatedField()
class TitleSerializer(ModelSerializer):
class Meta:
model = Title
fields = '__all__' | UTF-8 | Python | false | false | 410 | py | 15 | serializers.py | 13 | 0.673171 | 0.670732 | 0 | 15 | 26.4 | 74 |
mwisdom04/one_second_centre_port_hold | 15,796,889,733,087 | 3d32663899f9de8d59d774a265aa994123a3ea41 | 3356823028e4d78187c24b5de9e6c97668a743fb | /load_bpod_raw_events.py | a418791e557989fffa602be012dd35c1f6df81e2 | []
| no_license | https://github.com/mwisdom04/one_second_centre_port_hold | 60af622896ff6672365907fadd72635b6df24163 | 3733d7986888f331673e24cfd1076816bf867086 | refs/heads/main | 2023-07-17T23:33:06.977378 | 2021-09-07T15:24:34 | 2021-09-07T15:24:34 | 370,728,599 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import load_nested_structs as load_ns
def load_bpod_file(main_session_file):
# gets the Bpod data out of MATLAB struct and into python-friendly format
loaded_bpod_file = load_ns.loadmat(main_session_file)
# as RawEvents.Trial is a cell array of structs in MATLAB, we have to loop through the array and convert the structs to dicts
trial_raw_events = loaded_bpod_file['SessionData']['RawEvents']['Trial']
for trial_num, trial in enumerate(trial_raw_events):
trial_raw_events[trial_num] = load_ns._todict(trial)
loaded_bpod_file['SessionData']['RawEvents']['Trial'] = trial_raw_events
return loaded_bpod_file, trial_raw_events | UTF-8 | Python | false | false | 661 | py | 11 | load_bpod_raw_events.py | 10 | 0.727685 | 0.727685 | 0 | 11 | 59.181818 | 129 |
QiuhuaL/tensorflow | 8,169,027,797,455 | 46d884815aae5536325defc276fd453d169deb13 | f8e8495441c456b2a768c93c34c830b64e69c368 | /tensorflow/python/estimator/training_test.py | 4e67d4577195c31d753d6c5917fd83d14f5f0b5a | [
"Apache-2.0"
]
| permissive | https://github.com/QiuhuaL/tensorflow | 3434aca36f66ccf33271d55c47c6ee5b9c2f4585 | 3bc73f5e2ac437b1d9d559751af789c8c965a7f9 | refs/heads/master | 2021-06-27T19:48:50.506012 | 2017-09-12T22:57:35 | 2017-09-12T22:57:35 | 103,412,413 | 1 | 0 | null | true | 2017-09-13T14:48:38 | 2017-09-13T14:48:38 | 2017-09-13T14:30:08 | 2017-09-13T11:46:23 | 124,489 | 0 | 0 | 0 | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import training
from tensorflow.python.platform import test
from tensorflow.python.training import session_run_hook
_DEFAULT_EVAL_STEPS = 100
_DEFAULT_EVAL_DELAY_SECS = 120
_DEFAULT_EVAL_THROTTLE_SECS = 600
_INVALID_INPUT_FN_MSG = '`input_fn` must be callable'
_INVALID_HOOK_MSG = 'All hooks must be `SessionRunHook` instances'
_INVALID_MAX_STEPS_MSG = 'Must specify max_steps > 0'
_INVALID_STEPS_MSG = 'Must specify steps > 0'
_INVALID_NAME_MSG = '`name` must be string'
_INVALID_EVAL_DELAY_SECS_MSG = 'Must specify delay_secs >= 0'
_INVALID_EVAL_THROTTLE_SECS_MSG = 'Must specify throttle_secs >= 0'
_INVALID_ESTIMATOR_MSG = '`estimator` must have type `tf.estimator.Estimator`'
_INVALID_TRAIN_SPEC_MSG = '`train_spec` must have type `tf.estimator.TrainSpec`'
_INVALID_EVAL_SPEC_MSG = '`eval_spec` must have type `tf.estimator.EvalSpec`'
class _FakeHook(session_run_hook.SessionRunHook):
"""Fake implementation of `SessionRunHook`."""
class _InvalidHook(object):
"""Invalid hook (not a subclass of `SessionRunHook`)."""
class TrainSpecTest(test.TestCase):
"""Tests TrainSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.TrainSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertIsNone(spec.max_steps)
self.assertEqual(0, len(spec.hooks))
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
spec = training.TrainSpec(input_fn=lambda: 1, max_steps=2, hooks=hooks)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.max_steps)
self.assertEqual(tuple(hooks), spec.hooks)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.TrainSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_MAX_STEPS_MSG):
training.TrainSpec(input_fn=lambda: 1, max_steps=0)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.TrainSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
class EvalSpecTest(test.TestCase):
"""Tests EvalSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.EvalSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertEqual(_DEFAULT_EVAL_STEPS, spec.steps)
self.assertIsNone(spec.name)
self.assertEqual(0, len(spec.hooks))
self.assertEqual(0, len(spec.export_strategies))
self.assertEqual(_DEFAULT_EVAL_DELAY_SECS, spec.delay_secs)
self.assertEqual(_DEFAULT_EVAL_THROTTLE_SECS, spec.throttle_secs)
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
# TODO(b/65169058): Replace the export_strategies with valid instances.
spec = training.EvalSpec(input_fn=lambda: 1, steps=2, name='name',
hooks=hooks, export_strategies=hooks,
delay_secs=3, throttle_secs=4)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.steps)
self.assertEqual('name', spec.name)
self.assertEqual(tuple(hooks), spec.hooks)
self.assertEqual(tuple(hooks), spec.export_strategies)
self.assertEqual(3, spec.delay_secs)
self.assertEqual(4, spec.throttle_secs)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.EvalSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_STEPS_MSG):
training.EvalSpec(input_fn=lambda: 1, steps=0)
def testInvalidName(self):
with self.assertRaisesRegexp(TypeError, _INVALID_NAME_MSG):
training.EvalSpec(input_fn=lambda: 1, name=123)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.EvalSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
def testInvalidDelaySecs(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_DELAY_SECS_MSG):
training.EvalSpec(input_fn=lambda: 1, delay_secs=-1)
def testInvalidThrottleSecs(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_THROTTLE_SECS_MSG):
training.EvalSpec(input_fn=lambda: 1, throttle_secs=-1)
class TrainingExecutorTest(test.TestCase):
"""Tests _TrainingExecutor."""
def testRequiredArgumentsSet(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
executor = training._TrainingExecutor(estimator, train_spec, eval_spec)
self.assertEqual(estimator, executor.estimator)
def test_invalid_estimator(self):
invalid_estimator = object()
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
with self.assertRaisesRegexp(TypeError, _INVALID_ESTIMATOR_MSG):
training._TrainingExecutor(invalid_estimator, train_spec, eval_spec)
def test_invalid_train_spec(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
invalid_train_spec = object()
eval_spec = training.EvalSpec(input_fn=lambda: 1)
with self.assertRaisesRegexp(TypeError, _INVALID_TRAIN_SPEC_MSG):
training._TrainingExecutor(estimator, invalid_train_spec, eval_spec)
def test_invalid_eval_spec(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
invalid_eval_spec = object()
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
training._TrainingExecutor(estimator, train_spec, invalid_eval_spec)
if __name__ == '__main__':
test.main()
| UTF-8 | Python | false | false | 6,863 | py | 3 | training_test.py | 3 | 0.718199 | 0.708291 | 0 | 172 | 38.901163 | 80 |
Yodart/banky | 3,693,671,885,443 | 5643783b4cf6cfd0834da30717c5237ed9e346e6 | 29c6c54ab679fb2462e29212c75066e9d1940501 | /deposits.py | 5393d02735d24a2839f279f5a3df7174e6309351 | [
"MIT"
]
| permissive | https://github.com/Yodart/banky | 3e943c767ecb354fedba9a089d16225aabcb552d | 604c37ab80d95bb9f81d91534df512b20df5cd10 | refs/heads/master | 2022-12-16T05:10:39.152859 | 2020-09-14T03:44:17 | 2020-09-14T03:44:17 | 295,042,203 | 1 | 0 | null | false | 2020-09-14T03:44:18 | 2020-09-12T23:17:51 | 2020-09-14T03:43:48 | 2020-09-14T03:44:17 | 4,531 | 0 | 0 | 0 | Python | false | false | from flask import Flask, Blueprint, request, jsonify, make_response
from werkzeug.security import generate_password_hash, check_password_hash
from db import db_connect
from auth import require_auth_token
import datetime
import jwt
import psycopg2
import sys
deposits = Blueprint('deposits', __name__)
@ deposits.route('/deposit', methods=['POST'])
@db_connect
@require_auth_token
def create_deposit(current_account, db_cursor, db_connection):
account_number = request.json['account_number']
ammount = request.json['ammount']
try:
db_cursor.execute(
"INSERT INTO deposits (ammount,account_number) values(%s,%s)", (ammount, account_number))
db_cursor.execute(
"UPDATE accounts SET balance = balance + %s WHERE account_number = %s", (ammount, account_number))
db_connection.commit()
return jsonify({'message': 'Ammounted Deposited!'}), 200
except:
return {'error': "Unable to make deposit", "traceback": str(sys.exc_info())}, 401
@ deposits.route('/deposit/<int:deposit_id>', methods=['GET'])
@db_connect
@require_auth_token
def query_single_deposit(current_account, db_cursor, db_connection, deposit_id):
try:
db_cursor.execute(
"SELECT id,account_number,ammount,timestamp FROM deposits WHERE id=%s AND account_number = %s LIMIT 1", (deposit_id, current_account['account_number']))
account_data = db_cursor.fetchall()[0]
return {'id': account_data[0],
'account_number': account_data[1],
'ammount': account_data[2],
'timestamp': account_data[3]}, 200
except:
return {'error': "Unable to fetch /deposit/<id>", "traceback": str(sys.exc_info())}, 401
@ deposits.route('/deposits/<int:acc_number>', methods=['GET'])
@db_connect
@require_auth_token
def query_deposits(current_account, db_cursor, db_connection, acc_number):
if current_account['account_number'] != acc_number:
return jsonify({"error": "Sensity user data, please log into the account"}), 401
limit = request.args.get('limit')
offset = request.args.get('offset')
try:
db_cursor.execute(
"SELECT id,account_number,ammount,timestamp FROM deposits WHERE account_number = %s LIMIT %s OFFSET %s", (acc_number, limit, offset))
deposits = []
for deposit in db_cursor.fetchall():
deposits.append({'id': deposit[0],
'account_number': deposit[1],
'ammount': deposit[2],
'timestamp': deposit[3]})
return jsonify({'deposits': deposits}), 200
except:
return {'error': "Unable to fetch /deposits/<acc_number>", "traceback": str(sys.exc_info())}, 401
| UTF-8 | Python | false | false | 2,758 | py | 8 | deposits.py | 8 | 0.637056 | 0.625453 | 0 | 65 | 41.430769 | 164 |
vajjhala/ChessBot | 7,679,401,552,448 | 401cd8330665b0e7cfc05952b0c61fb705134614 | f26f67858e8bed1c7c3412c81ac11b2a584dddbe | /read_data.py | 3f6c34f32c67ee1dd0af5ab1f40e19421fdf0fd7 | []
| no_license | https://github.com/vajjhala/ChessBot | fbc786cacc2d7c022ba90450d9c6e6443f64b3de | 667e72fbeab03f9cc1571a1144b9f08e46859acd | refs/heads/master | 2021-01-22T19:36:36.367280 | 2018-10-24T15:14:04 | 2018-10-24T15:14:04 | 85,220,603 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import copy
import numpy as np
import random
import gzip
import math
class GeneratorRestartHandler(object):
def __init__(self, gen_func, argv, kwargv):
self.gen_func = gen_func
self.argv = copy.copy(argv)
self.kwargv = copy.copy(kwargv)
self.local_copy = self.gen_func(*self.argv, **self.kwargv)
def __iter__(self):
return GeneratorRestartHandler(self.gen_func, self.argv, self.kwargv)
def __next__(self):
return next(self.local_copy)
def next(self):
return self.__next__()
def restartable(g_func):
def tmp(*argv, **kwargv):
return GeneratorRestartHandler(g_func, argv, kwargv)
return tmp
with gzip.GzipFile('train_wins.npy.gz', "r") as f:
wins_train = np.load(f)
with gzip.GzipFile('train_loses.npy.gz', "r") as f:
loses_train = np.load(f)
with gzip.GzipFile('cross_validation_wins.npy.gz', "r") as f:
wins_cross = np.load(f)
with gzip.GzipFile('cross_validation_loses.npy.gz', "r") as f:
loses_cross = np.load(f)
@restartable
def auto_encoder_gen(batch_size):
'''
Choose a random set of one million wins and loses of white
and pass the instances without any information of the result
for feature extraction
Input : Give it the batch_size for training iteration in the autoencoder
'''
list_a = np.random.choice(wins_train.shape[0], 1000000, replace=False )
list_b = np.random.choice(loses_train.shape[0],1000000, replace=False )
L_ = loses_train[list_b] # one million loses
W_ = wins_train[list_a] # One million wins
# Join both and get a random shuffles set of 2 million instances
X = np.concatenate((L_,W_), axis=0)
randomize = np.arange( X.shape[0] )
np.random.shuffle(randomize)
# Ignoring the last coloumn which pertains to the result
X1 = X[randomize][:,:773]
data_len = X1.shape[0]
# Batch size for each training iteration
for slice_i in range(int(math.ceil(data_len / batch_size))):
idx = slice_i * batch_size
X_batch = X1[idx:idx + batch_size]
yield (X_batch.astype(np.int32))
@restartable
def siemese_generator(batch_size, data_type ):
assert data_type in ['cross_validation', 'train']
''' Training data generation :
1) Pick one million random instances from white wins and loses
2) Concatenate them to create a matrix of 2 million instances
3) Shuffle it to create (W,L) or (L,W) pairs
4) Separate the matrix into two parts
Send these two parts to the two branches of the siemese network
as X1, X2
Minize the loss with comparision to Y which is (1,0) or (0,1)
which indicates which branch of the siemese was given the winning
position.
'''
# training data
if data_type =='train':
list_a = np.random.choice(wins_train.shape[0], 1000000, replace=False )
list_b = np.random.choice(loses_train.shape[0],1000000, replace=False )
index = math.ceil( len(list_a) / 2 )
W1, W2 = wins_train[list_a][:index], wins_train[list_a][index:]
L1, L2 = loses_train[list_b][:index], loses_train[list_b][index:]
# Cross validation data :
# A set of about 10,000 instances of white wins and loses
# against which the model's accuracy will be compared
else:
list_a = np.random.choice(wins_cross.shape[0], 97000, replace=False )
list_b = np.random.choice(loses_cross.shape[0],97000, replace=False )
index = math.ceil( len(list_a) / 2 )
W1, W2 = wins_cross[list_a][:index], wins_cross[list_a][index:]
L1, L2 = loses_cross[list_b][:index], loses_cross[list_b][index:]
X_1 = np.concatenate((L1,W2), axis=0)
X_2 = np.concatenate((W1,L2), axis=0)
assert X_1.shape[0] == X_2.shape[0]
randomize = np.arange( X_1.shape[0] )
np.random.shuffle(randomize)
X1 = X_1[randomize][:,:773]
X2 = X_2[randomize] [:,:773]
Y = np.array(list(zip(X_1[randomize][:,-1], X_2[randomize][:,-1])))
# Mini batchs for gradient descent
data_len = Y.shape[0]
for slice_i in range(int(math.ceil(data_len / batch_size))):
idx = slice_i * batch_size
X1_batch = X1[idx:idx + batch_size]
X2_batch = X2[idx:idx + batch_size]
Y_batch = Y[idx:idx + batch_size]
yield (X1_batch.astype(np.int32), X2_batch.astype(np.int32),Y_batch.astype(np.int32))
| UTF-8 | Python | false | false | 4,602 | py | 12 | read_data.py | 9 | 0.608648 | 0.581921 | 0 | 142 | 31.401408 | 93 |
techtronics/ottomen | 5,789,615,960,257 | b6e90e91aeeb29c6fb5f3755d11099314dd10bb2 | 7aeef925467347597c5afb041bf7c8b5b248807f | /tests/resources/task_test.py | 0edd5fe1b6b760261e3f9c845e5bfbab661a1da9 | []
| no_license | https://github.com/techtronics/ottomen | e9c59a54c0f1ebe56195e306efd6e2f1488ab424 | 9cf5b8282f2349798b0b4d08626111f564d89f8c | refs/heads/master | 2017-12-21T21:44:20.232246 | 2015-09-21T16:53:47 | 2015-09-21T16:53:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import OttomenResourceTestCase
from ottomen.resources.services import *
from werkzeug.exceptions import NotFound
import sure
import pytest
from .helpers import create_task, create_experiment
class TaskResourceTestCase(OttomenResourceTestCase):
def test_db_create(self):
"""
It should be able to insert a answer in the database and successfully retrieve it
"""
task_db = create_task()
task_db.id.should.be.greater_than(0)
tasks.find(id=task_db.id).shouldnt.be(None)
tasks.get(id=task_db.id).shouldnt.be(None)
def test_db_delete(self):
"""
It should be able to delete a answer from the database
"""
task_db = create_task()
to_delete = tasks.delete(task_db)
to_delete.should.be(None)
deleted = tasks.get(id=task_db.id)
deleted.should.be(None)
def test_db_update(self):
task_db = create_task()
new_experiment = create_experiment()
updated = tasks.update(task_db, experiment=new_experiment)
updated.experiment.id.should.be.equal(new_experiment.id)
def test_malformed_model(self):
tasks.new.when.called_with(description="A shitty description", accuracy=.7, not_there=5).should.throw(TypeError)
def test_404(self):
tasks.get_or_404.when.called_with('10000000').should.throw(NotFound)
def test_new_mem(self):
task_db = create_task()
task_mem = tasks.new_mem(task_db).get()
task_mem['id'].should.be.equal(task_db.id)
task_mem['experiment_id'].should.be.equal(task_db.experiment_id)
task_mem['batch_size'].should.be.equal(task_db.batch_size)
task_mem['nr_of_batches'].should.be.equal(task_db.nr_of_batches)
task_mem['size'].should.be.equal(task_db.size)
task_mem['initial_consensus'].should.be.equal(task_db.initial_consensus)
task_mem['returning_consensus'].should.be.equal(task_db.returning_consensus)
task_mem['minimum_mt_score'].should.be.equal(task_db.minimum_mt_score)
task_mem['minimum_mt_submissions'].should.be.equal(task_db.minimum_mt_submissions)
task_mem['reward'].should.be.equal(task_db.reward)
task_mem['title'].should.be.equal(task_db.title)
task_mem['description'].should.be.equal(task_db.description)
task_mem['url'].should.be.equal(task_db.url)
tasks.get_mem_obj(task_db.id).id.should.be.equal(task_db.id)
def test_get_non_existing_mem(self):
tasks.get_mem_obj.when.called_with('not_there').should.throw(KeyError)
def test_new_mem_malformed_model(self):
tasks.new_mem.when.called_with({'not_there': True}).should.throw(TypeError)
def test_update_mem(self):
task_db = create_task()
task_mem = tasks.new_mem(task_db).get()
new_id = 'new_shitty_id'
task_mem['id'] = new_id
tasks.update_mem(task_mem)
tasks.get_mem_obj(new_id).id.should.be.equal(new_id)
def test_update_mem_malformed_model(self):
task_db = create_task()
task_mem = tasks.new_mem(task_db).get()
task_mem['key_that_doesnt_exist_in_model'] = 'new_shitty_value'
tasks.update_mem.when.called_with(task_mem).should.throw(TypeError)
# def test_access_experiment(self):
# task_db = create_task()
# exp_mem = experiments.new_mem(task_db.experiment)
# task_mem = tasks.new_mem(task_db)
# int(task_mem.experiment()['id']).should.be.equal(task_db.experiment_id)
| UTF-8 | Python | false | false | 3,511 | py | 119 | task_test.py | 97 | 0.650812 | 0.64597 | 0 | 85 | 40.305882 | 120 |
SimonOkello/carr_app | 1,743,756,737,338 | eed38ff7ccc871cad61bf29f2dd76f66a801bee3 | d4b79b3f61aca1b9dd85312fbba8529fbe1f287d | /src/cars/urls.py | 91d8b7fde89533a195fe36e015d416c786e8195f | []
| no_license | https://github.com/SimonOkello/carr_app | f53e111d0599e3909b5abaf49af0df33703d4fd2 | 776947fc17861527504368405e0df3080cf6b02c | refs/heads/master | 2022-04-07T11:37:33.481236 | 2020-03-06T12:48:53 | 2020-03-06T12:48:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from .views import upload_csv, custom_upload_csv
urlpatterns = [
path("import/", upload_csv),
path("custom_import/", custom_upload_csv)
]
| UTF-8 | Python | false | false | 177 | py | 16 | urls.py | 11 | 0.700565 | 0.700565 | 0 | 8 | 21.125 | 48 |
bellick/Algorithm | 10,617,159,183,466 | a8f934f9c87bab5c943abe2395b9a4787e8e7229 | ddcffe85ff80fec7b92ccc0a77acdd29406294d5 | /Sword2Offer/isNumeric.py | aa1db81ed32be405d536cddf728c7c92ca8ae81b | []
| no_license | https://github.com/bellick/Algorithm | 6b9296168467d7f299bd8fa589b6d7d90683364b | 583d2c810f2b7c9f0b776a5e635d0ca768346382 | refs/heads/master | 2020-05-05T10:50:48.396442 | 2019-06-14T01:08:11 | 2019-06-14T01:08:11 | 179,963,485 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
’‘’
请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。
例如,字符串"+100","5e2","-123","3.1416"和"-1E-16"都表示数值。
但是"12e","1a3.14","1.2.3","+-5"和"12e+4.3"都不是。
‘’‘
class Solution:
# s字符串
def isNumber(self,s):
digit = ['0','1','2','3','4','5','6','7','8','9']
if s[0]=='+' or s[0] == '-':
for c in s[1:]:
if
def isNumeric(self, s):
# write code here
s = Solution()
print(s.isNumeric("-1E-16")) | UTF-8 | Python | false | false | 563 | py | 65 | isNumeric.py | 24 | 0.478652 | 0.373034 | 0 | 20 | 21.3 | 55 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.