repo_name
stringlengths
7
111
__id__
int64
16.6k
19,705B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
151
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
2 values
repo_url
stringlengths
26
130
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
14.6k
687M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
12 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
10.2M
gha_stargazers_count
int32
0
178k
gha_forks_count
int32
0
88.9k
gha_open_issues_count
int32
0
2.72k
gha_language
stringlengths
1
16
gha_archived
bool
1 class
gha_disabled
bool
1 class
content
stringlengths
10
2.95M
src_encoding
stringclasses
5 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
10
2.95M
extension
stringclasses
19 values
num_repo_files
int64
1
202k
filename
stringlengths
4
112
num_lang_files
int64
1
202k
alphanum_fraction
float64
0.26
0.89
alpha_fraction
float64
0.2
0.89
hex_fraction
float64
0
0.09
num_lines
int32
1
93.6k
avg_line_length
float64
4.57
103
max_line_length
int64
7
931
GafferHQ/gaffer
11,338,713,680,519
3e9336d5f12ac401379a720fea095a331efb41ef
6146e33102797407ede06ce2daa56c28fdfa2812
/python/GafferImageUI/ImageTransformUI.py
f2f7e56eddf5930961f93affe4387334f5df06d4
[ "BSD-3-Clause" ]
permissive
https://github.com/GafferHQ/gaffer
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
59cab96598c59b90bee6d3fc1806492a5c03b4f1
refs/heads/main
2023-09-01T17:36:45.227956
2023-08-30T09:10:56
2023-08-30T09:10:56
9,043,124
707
144
BSD-3-Clause
false
2023-09-14T09:05:37
2013-03-27T00:04:53
2023-09-13T21:09:28
2023-09-14T09:05:36
74,700
880
196
333
Python
false
false
########################################################################## # # Copyright (c) 2014, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import itertools import Gaffer import GafferUI import GafferImage Gaffer.Metadata.registerNode( GafferImage.ImageTransform, "description", """ Scales, rotates and translates an image within its display window. Note that although the format is not changed, the data window is expanded to include the portions of the image which have been transformed outside of the display window, and these out-of-frame pixels can still be used by downstream nodes. """, plugs = { "transform" : [ "description", """ The transformation to be applied to the image. The translate and pivot values are specified in pixels, and the rotate value is specified in degrees. """, "plugValueWidget:type", "GafferUI.LayoutPlugValueWidget", ], "filter" : [ "description", """ The pixel filter used when transforming the image. Each filter provides different tradeoffs between sharpness and the danger of aliasing or ringing. """, "plugValueWidget:type", "GafferUI.PresetsPlugValueWidget", ] + list( itertools.chain( # Disk doesn't make much sense as a resizing filter, and also causes artifacts because # its default width is small enough to fall into the gaps between pixels. *[ ( "preset:" + x.title(), x ) for x in GafferImage.FilterAlgo.filterNames() if x != "disk" ] ) ), "invert" : [ "description", """ Apply the inverse transformation to the image. """ ], "concatenate" : [ "description", """ Combines the processing for a series of ImageTransforms so that transformation and filtering is only applied once. This gives better image quality and performance. > Note : When concatenation is in effect, the filter settings on upstream > ImageTransforms are ignored. """, "layout:section", "Node", "layout:index", -1, ], } )
UTF-8
Python
false
false
3,646
py
1,694
ImageTransformUI.py
1,585
0.689248
0.687877
0
117
30.162393
97
anncarln/start-python
2,723,009,278,879
9ec81bb08e54a284ec60bf92994e105b4e6d6f99
e79e3974f877be62f96bb575bbe84c5fe70e68b7
/ex026.py
327e9c006144f5b725acb0ba826f49b57f4f6b08
[]
no_license
https://github.com/anncarln/start-python
885ea5b8da8533035ab90edc1e7b52741421b76a
964f3ed54a2a05f3999bb30bdcd93dbdf816d0e0
refs/heads/master
2023-06-29T01:57:47.190743
2021-08-08T03:18:31
2021-08-08T03:18:31
282,767,791
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
frase = str(input('Digite a frase que você deseja analisar: ')).strip() frase1 = frase.upper() print(f"A letra A aparece {frase1.count('A')} vezes nessa frase") print(f"A primeira letra A apareceu na posição {frase1.find('A')+1}") print(f"A última letra A apareceu na posição {frase1.rfind('A')+1}")
UTF-8
Python
false
false
312
py
80
ex026.py
80
0.696078
0.676471
0
5
59
71
otacake/ML
755,914,283,856
925f04089db99ab5cd4b5c1997cd1c15163a9991
632a161c20cab566a63c5920a9e8b0f5635abee6
/sec3/Iris_training_logistic.py
1c9cfa781d8ee0d7b5ae53cfe07af08c5e9646e4
[]
no_license
https://github.com/otacake/ML
70eb32c6bad13dedb16a2f75e205fe2fb39e49a4
d26651db97b33241e2b22c52b5be6341c17b23f1
refs/heads/master
2023-01-06T18:28:20.751254
2020-11-07T16:22:52
2020-11-07T16:22:52
274,142,870
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from sklearn import datasets import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap def plot_decision_regions(X, y, classifier, test_idx=None,resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) # plot class samples for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=colors[idx], marker=markers[idx], label=cl, edgecolor='black') if test_idx: X_test,y_test = X[test_idx,:],y[test_idx] plt.scatter(X_test[:,0],X_test[:,1],c='',alpha=1.0,linewidths=1,marker="o",s=55,label="test set") iris = datasets.load_iris() #iris.dataで特徴量を出せる #iris.targetでクラスラベルのベクトルを出せる、もう数値化されてる X = iris.data[:,[2,3]] y = iris.target from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=1,stratify=y) #ここから特徴量の標準化をする from sklearn.preprocessing import StandardScaler #オブジェクト sc = StandardScaler() sc.fit(X_train) #ここで平均と標準偏差をベクトルごとに出してる X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) #ここまででようやく下準備ができたといえるみたい?(実際はもっと大変) from sklearn.linear_model import Perceptron ppn = Perceptron(n_iter_no_change=40,eta0=0.1,random_state=1,shuffle=True) ppn.fit(X_train_std,y_train) y_pred = ppn.predict(X_test_std) n = len(y_test) for i in range(n): print("predict:",str(y_pred[i]),"ans:",str(y_test[i])) X_combined_std = np.vstack((X_train_std,X_test_std)) y_combined = np.hstack((y_train,y_test)) plot_decision_regions(X=X_combined_std,y=y_combined,classifier=ppn,test_idx=range(105,150)) plt.show()
UTF-8
Python
false
false
2,626
py
21
Iris_training_logistic.py
20
0.628631
0.603734
0
76
30.723684
105
hugo1840/car-classification-with-sklearn
3,616,362,489,504
0040d9640981d6b16be19238303947ca650610c4
ac84c76cf2663548991962584d0b875c10881b75
/car_vanilla_evaluator.py
5f67a7b95a5829e91b1290a15311720c263297d8
[]
no_license
https://github.com/hugo1840/car-classification-with-sklearn
103e384201ab29b390d7234606b497eac3d1e946
a8681344ac5579c94a0fb0f164afc9e7a2df6bce
refs/heads/master
2021-09-05T05:06:00.431515
2018-01-24T08:40:41
2018-01-24T08:40:41
118,736,870
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Dec 30 12:56:10 2017 @author: Hugot """ import numpy as np # Part 1 : load data Xt = np.genfromtxt('car.data', delimiter=',', dtype=[ 'U15','U15','U15', 'U15','U15','U15','U15'], names=( 'buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'class')) #dict_buying = {'low':1, 'med':2, 'high':3, 'vhigh':4} #dict_maint = {'low':1, 'med':2, 'high':3, 'vhigh':4} #dict_doors = {'2':2, '3':3, '4':4, '5more':5} #dict_persons = {'2':2, '4':4, 'more':5} #dict_lug_boot = {'small':1, 'med':2, 'big':3} #dict_safety = {'low':1, 'med':2, 'high':3} #dict_class = {'unacc':0, 'acc':1, 'good':2, 'vgood':3} # ordered encoding : (i - 0.5)/N dict_buying = {'low':0.125, 'med':0.375, 'high':0.625, 'vhigh':0.875} dict_maint = {'low':0.125, 'med':0.375, 'high':0.625, 'vhigh':0.875} dict_doors = {'2':0.125, '3':0.375, '4':0.625, '5more':0.875} dict_persons = {'2':0.167, '4':0.500, 'more':0.833} dict_lug_boot = {'small':0.167, 'med':0.500, 'big':0.833} dict_safety = {'low':0.167, 'med':0.500, 'high':0.833} dict_class = {'unacc':0, 'acc':1, 'good':2, 'vgood':3} #Xtt=Xt[0:3] # row 0,1,2 #Xtt_num = np.zeros((3,7), dtype = np.int) data = np.zeros((Xt.shape[0],7), dtype=np.float) rr = 0 for row in Xt: # print(row) data[rr,:] = np.array([dict_buying[row[0]], dict_maint[row[1]], dict_doors[row[2]], dict_persons[row[3]], dict_lug_boot[row[4]], dict_safety[row[5]], dict_class[row[6]]]) rr = rr + 1 # Part 2: training set = 1210 & test set = 518 np.random.seed(10) set_perm = np.random.permutation(range(data.shape[0])) ind_train = set_perm[0:1210] ind_test = set_perm[1210:] X_train = data[ind_train,0:6] y_train = data[ind_train,6] X_test = data[ind_test,0:6] y_test = data[ind_test,6] # Part 3: standardization from sklearn import preprocessing scaler = preprocessing.StandardScaler().fit(X_train) #print('X scaler mean: ', scaler.mean_) #print('X scaler variance: ', scaler.var_) standardized_X = scaler.transform(X_train) # Part 4: feature selection from sklearn.ensemble import ExtraTreesClassifier slct = ExtraTreesClassifier() slct.fit(X_train, y_train) # display the relative importance of each attribute print('\n\nfeature_importances:') print(slct.feature_importances_) # Part 5: decision tree classifier from sklearn import tree #from sklearn.tree import DecisionTreeClassifier # fit a ID3 model to the data : 'entropy', or CART model: 'gini' # change max_depth & min_samples_leaf to simplify the tree clf = tree.DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=None, splitter='best') #clf.fit(standardized_X, y_train) clf.fit(X_train, y_train) print('\n\nDecision_tree_classifier:') print(clf) from sklearn import metrics std_X_test = scaler.transform(X_test) expected_clf = y_test #predicted_clf = clf.predict(std_X_test) predicted_clf = clf.predict(X_test) # summarize the fit of the model print('\n\nDecision_tree_classifier_prediction_summary:') print(metrics.classification_report(expected_clf, predicted_clf)) print(metrics.confusion_matrix(expected_clf, predicted_clf)) # visualization # D:\Anaconda3\Library\bin\graphviz import graphviz dot_data = tree.export_graphviz(clf, out_file=None, feature_names=['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety'], class_names=['unacc', 'acc', 'good', 'vgood'], filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph.render("cars_tree") # Part 6: Naive Bayes classifier from sklearn.naive_bayes import GaussianNB nby = GaussianNB() nby.fit(X_train, y_train) print('\n\nNaive_bayes_classifier:') print(nby) # make predictions expected_nby = y_test predicted_nby = nby.predict(X_test) # summarize the fit of the model print('\n\nNaive_bayes_classifier_prediction_summary:') print(metrics.classification_report(expected_nby, predicted_nby)) print(metrics.confusion_matrix(expected_nby, predicted_nby)) # Part 7: k Nearest Neighbors classifier from sklearn.neighbors import KNeighborsClassifier # fit a k-nearest neighbor model to the data knb = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=5, p=2, weights='uniform') knb.fit(X_train, y_train) print('\n\nK_nearest_neighbors_classifier:') print(knb) # make predictions expected_knb = y_test predicted_knb = knb.predict(X_test) # summarize the fit of the model print('\n\nK_neighbors_classifier_prediction_summary:') print(metrics.classification_report(expected_knb, predicted_knb)) print(metrics.confusion_matrix(expected_knb, predicted_knb)) # Part 8: support vector machine classifier from sklearn.svm import SVC # fit a SVM model to the data svmc = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) svmc.fit(X_train, y_train) print('\n\nSVM_classifier:') print(svmc) # make predictions expected_svmc = y_test predicted_svmc = svmc.predict(X_test) # summarize the fit of the model print('\n\nSVM_classifier_prediction_summary:') print(metrics.classification_report(expected_svmc, predicted_svmc)) print(metrics.confusion_matrix(expected_svmc, predicted_svmc)) # Part 9: Multilayer Peceptron classifier from sklearn.neural_network import MLPClassifier mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1) mlp.fit(X_train, y_train) print('\n\nMLP_classifier:') print(mlp) # make predictions expected_mlp = y_test predicted_mlp = mlp.predict(X_test) # summarize the fit of the model print('\n\nMLP_classifier_prediction_summary:') print(metrics.classification_report(expected_mlp, predicted_mlp)) print(metrics.confusion_matrix(expected_mlp, predicted_mlp))
UTF-8
Python
false
false
6,325
py
3
car_vanilla_evaluator.py
2
0.673992
0.636364
0
185
33.194595
103
dmanchon/storydraw_api
12,678,743,484,931
0e2df0d050514a923dd5f6335ee358b6f9f1f2ef
e3f45e0e3b2fb30155d5469aef15e1f557cf7172
/api/views.py
c590017ab6152a0955e7241cf5cebe44cc9502ef
[]
no_license
https://github.com/dmanchon/storydraw_api
74183f5678274289bf6ec841fb313e4122a3b27f
063db0f44e990e9c64382c526298fb6ed01b75d4
refs/heads/master
2020-12-31T02:42:15.996453
2016-04-12T18:22:00
2016-04-12T18:22:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from api.models import Game from api.serializers import GameSerializer from rest_framework import viewsets from rest_framework.response import Response class GameViewSet(viewsets.ViewSet): def list(self, request): """ API endpoint that allows users to be viewed or edited. """ queryset = Game.objects.all().order_by('-start_date') serializer = GameSerializer return Response(serializer.data) def retrieve(self, request, key=None): return True def create(self, request): return True
UTF-8
Python
false
false
562
py
7
views.py
7
0.681495
0.681495
0
20
27.1
62
triumphpc/profileApp
7,816,840,488,828
f8a7a604f7e1c183e7913c79cc23ff156f500280
8273a4db2e955c1300ce46f39a9de6a4816a90e8
/profileDjango/profileApp/admin.py
405f6ab228dc75753133de2a012cbdd8b1dfd66e
[]
no_license
https://github.com/triumphpc/profileApp
859e1f761164476b0f1a31f78ac1df88bc300374
65da33d1c93aa81496cf215b91f641c7ded5e4d5
refs/heads/master
2019-03-12T21:40:45.184859
2017-09-11T18:39:13
2017-09-11T18:39:13
102,396,801
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from profileApp.models import ProfileVk @admin.register(ProfileVk) class ProfileAppAdmin(admin.ModelAdmin): list_display = ('phoneNumber', 'firstName', 'secondName', 'isOnline') list_filter = ['phoneNumber'] from profileApp.models import City @admin.register(City) class ProfileAppCityAdmin(admin.ModelAdmin): list_display = ['title'] from profileApp.models import Education @admin.register(Education) class ProfileAppEducationAdmin(admin.ModelAdmin): list_display = ['universityName'] from profileApp.models import Occupation @admin.register(Occupation) class ProfileAppOccupationAdmin(admin.ModelAdmin): list_display = ['type', 'name'] from profileApp.models import Militery @admin.register(Militery) class ProfileAppMiliteryAdmin(admin.ModelAdmin): list_display = ['unit'] from profileApp.models import Schools @admin.register(Schools) class ProfileAppSchoolAdmin(admin.ModelAdmin): pass from profileApp.models import Universities @admin.register(Universities) class ProfileAppUniversityAdmin(admin.ModelAdmin): pass from profileApp.models import Country @admin.register(Country) class ProfileAppCountryAdmin(admin.ModelAdmin): pass
UTF-8
Python
false
false
1,214
py
8
admin.py
7
0.793245
0.793245
0
44
26.613636
73
treetrnk/flask_writer
10,694,468,601,258
2848b410fdae298a1df80f01252fbde8e61ef2c5
f9abe32ae9a9dd25a01187dde74d98214ca32de4
/migrations/versions/44077c9504c9_definitions.py
c44303c1c4c9b1dc6f12df85cda1a90a9a25c6ac
[]
no_license
https://github.com/treetrnk/flask_writer
79bdab2f952d2667d92824b5abab0876b69ff97d
c7b7443b5afc065626850bbde21a38aa30570d1e
refs/heads/master
2023-09-03T11:41:41.412642
2023-08-31T18:54:55
2023-08-31T18:54:55
181,212,814
5
3
null
false
2023-02-16T04:40:38
2019-04-13T18:28:35
2023-01-09T16:17:26
2023-02-16T04:40:36
17,190
5
3
3
Python
false
false
"""definitions Revision ID: 44077c9504c9 Revises: 67240b207c34 Create Date: 2019-07-28 05:24:34.831541 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '44077c9504c9' down_revision = '67240b207c34' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('definition', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=150), nullable=False), sa.Column('body', sa.String(length=5000), nullable=False), sa.Column('hidden_body', sa.String(length=5000), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table('tags_defs', sa.Column('tag_id', sa.Integer(), nullable=False), sa.Column('definition_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['definition_id'], ['definition.id'], ), sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ), sa.PrimaryKeyConstraint('tag_id', 'definition_id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('tags_defs') op.drop_table('definition') # ### end Alembic commands ###
UTF-8
Python
false
false
1,251
py
110
44077c9504c9_definitions.py
48
0.668265
0.611511
0
42
28.785714
69
bganglia/pigar
17,051,020,166,939
1ea3515bc04f9608bc1becc9ccb151add0c31d80
a2c7a9a42fc2904131b4fb96ac1b12556263878e
/pigar/pypi.py
8641255fcbc56e83ceaff846dc6ad16a627c5493
[ "BSD-3-Clause" ]
permissive
https://github.com/bganglia/pigar
6f2c98cc8902b79f83546d3947352e0eac59e734
3a656a4b8ba1c2050b33213f1c002dfb9ebc9a24
refs/heads/master
2020-12-02T09:00:42.143289
2020-01-02T05:53:57
2020-01-02T05:53:57
230,955,096
0
0
NOASSERTION
true
2019-12-30T17:34:41
2019-12-30T17:34:40
2019-12-27T23:27:48
2019-08-28T04:00:34
14,695
0
0
0
null
false
false
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import import gzip import json import collections import io import threading import multiprocessing from multiprocessing import Queue as ProcessSharableQueue try: # py2 from HTMLParser import HTMLParser from urlparse import urljoin from Queue import Empty, Queue as ThreadSharableQueue except ImportError: # py3 from html.parser import HTMLParser from urllib.parse import urljoin from queue import Empty, Queue as ThreadSharableQueue import concurrent.futures import requests from .db import database, Database from .unpack import top_level from .log import logger from .utils import Color, compare_version, cmp_to_key, binary_type PYPI_URL = 'https://pypi.org' PKG_URL = urljoin(PYPI_URL, '/pypi/{0}') PKGS_URL = urljoin(PYPI_URL, '/simple/') PKG_INFO_URL = urljoin(PYPI_URL, '/pypi/{0}/json') ACCEPTABLE_EXT = ('.whl', '.egg', '.tar.gz', '.tar.bz2', '.zip') def search_names(names, installed_pkgs): """Search package information by names(`import XXX`). """ dler = Downloader() results = collections.defaultdict(list) not_found = list() for name in names: logger.info('Searching package name for "{0}" ...'.format(name)) # If exists in local environment, do not check on the PyPI. if name in installed_pkgs: results[name].append(list(installed_pkgs[name]) + ['local']) # Check information on the PyPI. else: rows = None with database() as db: rows = db.query_all(name) if rows: for row in rows: version = dler.download_package(row.package).version() results[name].append((row.package, version, 'PyPI')) else: not_found.append(name) return results, not_found def check_latest_version(package): """Check package latest version in PyPI.""" return Downloader().download_package(package).version() def update_db(): """Update database.""" print(Color.BLUE('Starting update database ...')) print(Color.YELLOW('The process will take a long time!!!')) logger.info('Crawling "{0}" ...'.format(PKGS_URL)) try: updater = Updater() except Exception: logger.error("Fail to fetch all packages: ", exc_info=True) print(Color.RED('Operation aborted')) return try: updater.run() updater.wait() except (KeyboardInterrupt, SystemExit): # FIXME(damnever): the fucking signal.. updater.cancel() print(Color.BLUE('Operation canceled!')) else: print(Color.BLUE('Operation done!')) class Updater(object): _CPU_NUM = multiprocessing.cpu_count() # C*2+1? The bandwidth matters.. def __init__(self, proc_num=_CPU_NUM): self._proc_num = proc_num downloader = Downloader() index = downloader.download_index() downloader.close() threads_total = self._proc_num * 6 if threads_total < 24: threads_total = 24 self._threads_per_proc = threads_total // self._proc_num # XXX(damnever): using the magic __new__??? self._thread_updater = None if proc_num == 1: pkg_names = ThreadSharableQueue() _extract_pkg_names(index, pkg_names.put) self._thread_updater = ThreadPoolUpdater( pkg_names, threads_total) else: self._pkg_names = ProcessSharableQueue() t = threading.Thread( target=_extract_pkg_names, args=(index, self._pkg_names.put) ) t.daemon = True t.start() self._feed_thread = t self._procs = [] with database(): pass def run(self): if self._thread_updater is not None: self._thread_updater.run() return for _ in range(self._proc_num): proc = multiprocessing.Process( target=self._proc_main, args=(self._pkg_names, self._threads_per_proc), ) proc.daemon = True proc.start() self._procs.append(proc) def wait(self): if self._thread_updater is not None: return self._thread_updater.wait() self._feed_thread.join() [proc.join() for proc in self._procs] def cancel(self): if self._thread_updater is not None: return self._thread_updater.cancel() [proc.terminate() for proc in self._procs] [proc.join(timeout=1) for proc in self._procs] def _proc_main(self, pkg_names, workernum): tupdater = ThreadPoolUpdater(pkg_names, workernum) tupdater.run() tupdater.wait() class ThreadPoolUpdater(object): def __init__(self, pkg_names, workernum=24): self._max_workers = workernum self._pkg_names = pkg_names self._futures = [] def run(self): with concurrent.futures.ThreadPoolExecutor( max_workers=self._max_workers) as executor: # Incase of unexpected error happens. for _ in range(self._max_workers*3): future = executor.submit(self.extract_and_update) self._futures.append(future) def wait(self): for future in concurrent.futures.as_completed(self._futures): try: error = future.exception() except concurrent.futures.CancelledError: break if error is not None: logger.error('Unexpected error: {}'.format(error)) def cancel(self): for future in self._futures: future.cancel() def extract_and_update(self): dler = Downloader() db = Database() try: while 1: try: pkg_name = self._pkg_names.get(block=False) logger.info('Processing package: %s', pkg_name) pkg = dler.download_package(pkg_name) top_levels = pkg.top_levels() db.insert_package_with_imports(pkg_name, top_levels) except (requests.RequestException, KeyError) as e: logger.error( ('Maybe package "%s" is no longer available' ' or it is non-standard: %r'), pkg_name, e) except Empty: pass except Exception: logger.debug('Thread exited:', exc_info=True) raise finally: dler.close() db.close() class Downloader(object): _HEADERS = { 'Accept': '*/*', 'Accept-Encoding': 'gzip', 'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4', 'User-Agent': ('Mozilla/5.0 (X11; Linux x86_64; rv:13.0) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/44.0.2403.157 Safari/537.36'), } def __init__(self): self._session = requests.Session() def download(self, url, try_decode=True): # XXX(damnever): timeout? resp = self._session.get(url, headers=self._HEADERS) resp.raise_for_status() data = resp.content if 'gzip' == resp.headers.get('Content-Encoding'): try: with gzip.GzipFile(fileobj=io.BytesIO(data)) as gz: data = gz.read() except (OSError, IOError): # Not a gzip file pass if try_decode and isinstance(data, binary_type): data = data.decode('utf-8') return data def download_index(self): return self.download(PKGS_URL) def download_package(self, name): pkg_info = self.download(PKG_INFO_URL.format(name)) return Package(name, json.loads(pkg_info), self) def close(self): self._session.close() class Package(object): def __init__(self, name, pkg_info, downloader): self._name = name self._pkg_info = pkg_info self._downloader = downloader def version(self): info = self._pkg_info try: latest = info['info'].get('version', None) if latest is not None: return latest latest = sorted(info['releases'], key=cmp_to_key(compare_version)) latest = latest[-1] return latest except KeyError: return 'unknown' def top_levels(self): # Extracting names which can be imported. url = None for item in self._pkg_info['urls']: if item['filename'].endswith(ACCEPTABLE_EXT): url = item['url'] break if url is None: return [] pkg = self._downloader.download(url, try_decode=False) try: return top_level(url, pkg) except Exception: return [] def _extract_pkg_names(html, put): """Extract data from html.""" class PackageNameParser(HTMLParser): def handle_starttag(self, tag, attrs): if tag == 'a': attrs = dict(attrs) if attrs.get('href', None): name = attrs['href'].strip('/').split('/')[-1] put(name) PackageNameParser().feed(html)
UTF-8
Python
false
false
9,396
py
2
pypi.py
1
0.561196
0.554704
0
296
30.743243
78
Rahulrajsr2714/simple-eccomerce-django-project
14,302,241,104,838
6eb3393b9865346a8135531b0b5c247b2b85a272
5a90a0c641ae4bc215a9bf0769c5fac0eacb8426
/customer/urls.py
f856e70e73bacd1f2c775067e52aac9cc104b2bf
[]
no_license
https://github.com/Rahulrajsr2714/simple-eccomerce-django-project
8a1f3d6554815d5e90c381ff2c70d351791eee09
1785f2e7373f337fec188902e9fd54bdc798901e
refs/heads/master
2022-12-09T21:53:15.072169
2020-09-12T18:29:56
2020-09-12T18:29:56
294,997,041
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.urls import path from . import views from django.views.generic import RedirectView urlpatterns = [ path('', RedirectView.as_view(url='products')), path('registercustomer', views.registercustomer, name='registercustomer'), path('logout', views.logoutcustomer, name='logoutcustomer'), path('logincustomer', views.logincustomer, name='logincustomer'), path('products', views.homepage, name='products'), path('addtocart', views.addproducttocart, name='addtocart'), path('removefromcart', views.removeproductfromcart, name='removefromcart'), path('viewcustomercart', views.viewcustomercart, name='viewcustomercart'), path('removefromcartpage/<int:cart_item_id>', views.removeproductcartpage, name='removeproductcartpage'), path('checkoutcustomer', views.checkoutcustomer, name='checkoutcustomer'), path('markpaymentsuccess', views.markpaymentsuccess, name='markpaymentsuccess'), path('viewproduct/<int:product_id>', views.viewproductdetails, name='viewproductdetails'), ]
UTF-8
Python
false
false
1,047
py
9
urls.py
3
0.741165
0.741165
0
21
48.857143
84
IvanovVitalii/project_ssw
15,152,644,666,626
f3064cc2f05339b9ea8b3316f927f54980d8f616
d0356e1bfb7c8bcbc8afdb80aa55c5ee66af9b4b
/project_ssw/alias/models.py
eeea15630591e09f47fdaf8f200d963ca921a611
[]
no_license
https://github.com/IvanovVitalii/project_ssw
7efd60f84090610170627d1912d0fea67ed97e5f
de69a5a8bca862581cd5b37db9e1154052dc3a44
refs/heads/main
2023-06-30T19:10:33.838705
2021-07-30T07:07:33
2021-07-30T07:07:33
381,144,656
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models class Alias(models.Model): alias = models.CharField(max_length=24) target = models.CharField(max_length=24) start = models.DateTimeField() end = models.DateTimeField(default=None, null=True)
UTF-8
Python
false
false
238
py
10
models.py
8
0.726891
0.710084
0
8
28.75
55
cjacques01/portfolio
1,005,022,371,983
909b80353e604352da8940f9c79c0100197a2cfd
8cd560b3ee8bea9da35b46b8089c65ea6bf34886
/blog/views.py
7b1c054eb356a3542bde4c7576d2219b3daf55fd
[]
no_license
https://github.com/cjacques01/portfolio
d9d88289c62774ebecae3e0cd96df789ced1e2ee
1b605820dcb57514489d799b44bdf685e41be745
refs/heads/master
2022-12-13T13:16:59.107759
2020-02-24T04:00:53
2020-02-24T04:00:53
242,603,810
0
0
null
false
2022-12-08T03:40:32
2020-02-23T22:49:55
2020-02-24T04:01:14
2022-12-08T03:40:32
12,069
0
0
2
HTML
false
false
from django.shortcuts import render, get_object_or_404 from django.views import generic from django.http import Http404 from . import models from django.contrib import messages # Create your views here. class BlogList(generic.ListView): paginate_by = 5 model=models.Blog def detail(request, blog_id): detailblog = get_object_or_404(models.Blog, pk=blog_id) return render(request, 'blog/blog_detail.html', {'blog':detailblog})
UTF-8
Python
false
false
447
py
12
views.py
4
0.749441
0.727069
0
17
25.294118
72
gungui98/INT3414---Reinforcement-Learning
7,284,264,574,929
3c60c7e2a863700fd3ecc20d38b6870796b7b008
5c8f862e07409b06c00eef48d5ef25b8e86828c8
/Tic-Tac-Toe/testAI.py
166a3e82947912e46e0916acff4955b0b7481040
[ "MIT" ]
permissive
https://github.com/gungui98/INT3414---Reinforcement-Learning
fba4a87c6bbff47b3bc74188f187252ad52ddca4
38bce513bdac861c9f864ebeff8387e8fde2c96d
refs/heads/master
2021-05-10T18:56:52.969638
2018-02-06T04:51:18
2018-02-06T04:51:18
118,138,998
0
2
MIT
false
2018-02-05T15:29:06
2018-01-19T15:01:03
2018-01-21T13:03:46
2018-02-05T15:29:05
745
0
2
0
Python
false
null
from env4 import TicTacToeEnv as env from AI import AI import numpy as np import pandas as pd import matplotlib.pyplot as plt def csv_to_arr(file): df=pd.read_csv(file,sep=',',header=None) return(df.values) env=env() player1=AI(env,1) player2=AI(env,2) player1.q=csv_to_arr('data2.csv') player2.q=csv_to_arr('data1.csv') done=False win=0 loss=0 draw=0 list1=[] list2=[] for i in range(10000): env.reset() player1.reset() player2.reset() done=False reward=0 while (done==False): reward,done=player1.nextState() if (reward==1): win=win+1 if (reward==-1): loss=loss+1 if ((reward==0) & (done==True)): draw=draw+1 list1=list1+[win-loss] list2=list2+[i] #env.show_board() if (done): break reward,done=player2.nextState() if ((reward==0) & (done==True)): draw=draw+1 print('win=',win) print('loss=',loss) print('draw=',draw) plt.plot(list2,list1) plt.show()
UTF-8
Python
false
false
939
py
19
testAI.py
14
0.640043
0.599574
0
46
18.456522
41
nyucel/kriptografi
9,182,640,119,544
a9ec064bcb55b36178280f531ec3b54564b97f5a
512d928c138008406733d48bdedc00bf006ed984
/final/140401016/final.py
e739ece3cabe1bfddc16309cceccfbcedf95a3e4
[ "Unlicense" ]
permissive
https://github.com/nyucel/kriptografi
4bb73ada2088777cd3df5656c6695daf045d9fc9
10325e745da5cc77bc9a80d94600fd9ff7c21ee5
refs/heads/master
2022-11-12T10:56:03.525223
2020-06-26T09:13:02
2020-06-26T09:13:02
255,692,683
9
54
Unlicense
false
2020-06-26T09:13:03
2020-04-14T18:30:27
2020-06-26T09:12:47
2020-06-26T09:13:03
217
6
30
0
Python
false
false
import random import time def generate_bit(n): p = random.getrandbits(n) bin = format(p, 'b').zfill(n) return bin def and_bit(block1, block2): res = "" if len(block1) == len(block2): for i in range(len(block1)): res = res + str(int(block1[i]) & int(block2[i])) return (res) else: print("AND Operation Failure!") def or_bit(block1, block2): res = "" if len(block1) == len(block2): for i in range(len(block1)): res = res + str(int(block1[i]) | int(block2[i])) return (res) else: print("OR Operation Failure!") def xor_bit(block1, block2): res = "" if len(block1) == len(block2): for i in range(len(block1)): res = res + str(int(block1[i]) ^ int(block2[i])) return (res) else: print("XOR Operation Failure!") def not_bit(block): mask = "11111111" res = "" if len(block) == len(mask): for i in range(len(block)): res = res + str(int(block[i]) ^ int(mask[i])) return (res) else: print("NOT Operation Failure!") def add_bit(block1, block2): res = "" add = 0 for i in range(len(block1)-1, -1, -1): flag = int(block1[i]) + int(block2[i]) + add if flag >= 2: add = 1 res = res + str(flag % 2) else: add = 0 res = res + str(flag) return ''.join(reversed(res)) def right_shift_bit(k, block): k = int(k % (len(block) / 2)) block_list = [] for i in range(len(block)): block_list.append(block[i]) for i in range(k): for j in range(len(block)-1, 0, -1): block_list[j] = block_list[j-1] block_list[0] = 0 result_block = "" for e in block_list: result_block = result_block + str(e) return result_block def select_next_function(i, block1, block2, block3): if i%4 == 0: return or_bit(and_bit(block1, block2), and_bit(block1, not_bit(block3))) elif i%4 == 1: return xor_bit(xor_bit(block1, block2), block3) elif i%4 == 2: return or_bit(and_bit(not_bit(block1), block2), and_bit(block3, block2)) else: return or_bit(or_bit(and_bit(block1,block2),and_bit(block1,block3)),and_bit(block2,block3)) def k_value(i): K = ["10010011","11000010","11001100","01000100", "01011010","00101001","01101001","10001110"] return K[i%8] def check_first_eight(unCheck): if unCheck[0:8] == "00000000": return True return False def hash(before_hash): block1 = before_hash[0:8] block2 = before_hash[8:16] block3 = before_hash[16:24] block4 = before_hash[24:32] W = [block1, block2, block3, block4] for i in range(32): f_result = select_next_function(i,block1,block2,block3) shifted_block1 = right_shift_bit(i+1,block1) w = W[i%4] k = k_value(i) operated = add_bit(k,add_bit(w,add_bit(shifted_block1,add_bit(block4,f_result)))) block4 = block3 block3 = right_shift_bit(i+1,block2) block2 = block1 block1 = operated hashed = block1 + block2 + block3 + block4 return hashed def final(filepath): f = open(filepath, "r") before_hash = f.read() f.close() return hash(before_hash) def start_block_chain(): f = open("HASHSUM.txt", "w") f.write("NO RASTGELE-SAYI ONCEKI-BLOK-OZETI") f.close() start_time = time.time() for i in range(99): if(time.time()-start_time > 600): print("10DK süre sınırı aşıldı.") break filepath_read = "%.3d" % (i + 1) hashed = final("%s.txt" % (filepath_read)) flag = False while flag is False: random = generate_bit(32) before_hash = add_bit(random, hashed) after_hash = hash(before_hash) flag = check_first_eight(after_hash) filepath_write = "%.3d" % (i + 2) f = open("%s.txt" % (filepath_write), "w") f.write(after_hash) f.close() f = open("HASHSUM.txt", "a") hashSum = "\n%.3d %s %s" % (i + 2, random, hashed) f.write(hashSum) f.close() start_block_chain()
UTF-8
Python
false
false
4,399
py
64
final.py
54
0.525273
0.477687
0
147
27.863946
99
skiran252/quantizing_model_script
10,385,230,934,508
971e28e32686bb5621fefb2d3402b48488200e79
119125857c9c594d51a14173a1c24e47a311085c
/transform.py
7bd6f8a072d58f181465da84d54befccaa9d71ef
[]
no_license
https://github.com/skiran252/quantizing_model_script
4a9d96aa76921b9404f2b5568c64405121862976
45447101c1adf47bc8f0189df1598cfd1be5073c
refs/heads/main
2023-04-23T12:29:51.634669
2021-05-21T12:38:33
2021-05-21T12:38:33
369,531,148
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import torch import numpy as np from simpletransformers.model import TransformerModel from transformers import RobertaForSequenceClassification, RobertaTokenizer import onnx import onnxruntime model = TransformerModel( "roberta", "Roberta_evaluator_6", args=({"fp16": False}), num_labels=2,use_cuda=False ) tokenizer = RobertaTokenizer.from_pretrained("Roberta_evaluator_6") question = "who is the father of the nation?" answer = "Mahatma Gandhi" input_data = tokenizer( text=question, text_pair=answer, padding="max_length", max_length=512, truncation=True, return_tensors="pt", ) torch.onnx.export( model.model, (input_data["input_ids"], input_data["attention_mask"]), "roberta.onnx", opset_version=11, input_names=["input_ids", "attention_mask"], output_names=["output"], dynamic_axes={ "input": {0: "batch_size", 1: "sentence_length"}, "output": {0: "batch_size"}, }, )
UTF-8
Python
false
false
955
py
4
transform.py
3
0.685864
0.672251
0
38
24.157895
89
Jimyfar/bbs
11,458,972,769,699
2f22b4651b83ddfbfbecdc7b029c2109d39810ee
71ba8b6ec24e905d9f70d92f95ef4295df9944fb
/routes/setting.py
9e136719c5a74e41cfc5bfe05e3e5762bce385e3
[]
no_license
https://github.com/Jimyfar/bbs
688c5b6bc6461ffbc4f04a383806bf45860a2ca3
1389107c29dfb53bdbfeabd7338de3f74685deea
refs/heads/master
2020-06-16T10:35:36.352525
2019-07-08T13:44:21
2019-07-08T13:44:21
195,542,261
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import uuid from flask import ( render_template, request, redirect, url_for, Blueprint, ) from werkzeug.datastructures import FileStorage import config from models.user import User from routes import current_user from utils import log main = Blueprint('setting', __name__) @main.route('/') def index(): print('running setting route') u = current_user() if u is None: return redirect(url_for('.index')) else: return render_template( 'setting.html', user=u, alert_error=None, alert_success=None, ) @main.route('/update_username_and_signature', methods=['POST']) def update_username_and_signature(): username = request.form.get('username', '') signature = request.form.get('signature', '') u = current_user() if signature == '': # 签名为空时使用默认签名 signature = config.default_signature if not User.validate_name(username): username = u.username User.update(u.id, username=username, signature=signature) return redirect(url_for(".index")) @main.route('/update_password', methods=['POST']) def update_password(): old_password = request.form.get('old_password', '') new_password = request.form.get('new_password', '') u = current_user() form = dict( username=u.username, password=old_password, ) # 用验证登录的方法判断用户原密码是否正确 if User.validate_login(form) is not None and User.validate_password(new_password): User.update(u.id, password=User.salted_password(new_password)) return render_template( 'setting.html', user=u, alert_error=None, alert_success='修改密码成功', ) else: return render_template( 'setting.html', user=u, alert_error="修改密码失败", alert_success=None, ) @main.route('/image/add', methods=['POST']) def avatar_add(): file: FileStorage = request.files['avatar'] # file = request.files['avatar'] # filename = file.filename # ../../root/.ssh/authorized_keys # images/../../root/.ssh/authorized_keys # filename = secure_filename(file.filename) suffix = file.filename.split('.')[-1] # filename = '{}.{}'.format(str(uuid.uuid4()), suffix) filename = str(uuid.uuid4()) path = os.path.join('images', filename) file.save(path) u = current_user() User.update(u.id, image='/images/{}'.format(filename)) return redirect(url_for('setting.index'))
UTF-8
Python
false
false
2,636
py
17
setting.py
8
0.607759
0.606583
0
97
25.298969
86
alirahatmayo/PlayFaction
893,353,248,243
4331f57f17b638853022bb41090da46a92e382d3
46f47e43a08cebde519b9efa97911b0fd8fdd241
/account/api/serializers.py
e8eee8ef46c0284db7fa3d22ac37798ca445284d
[]
no_license
https://github.com/alirahatmayo/PlayFaction
7615c2f1cb95aa921ed843df93273cda81e0bd82
433ceedef356339063306ce6faed4906b837b025
refs/heads/master
2020-04-21T14:34:03.945962
2019-02-20T17:39:56
2019-02-20T17:39:56
169,639,252
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from rest_framework import serializers from transaction.api.serializers import TransactionSerializer from account.models import Account ''' Serializer -> converts into JSON Serializer ->does Validations on data ''' class AccountSerializer(serializers.ModelSerializer): transfer_from = TransactionSerializer(many= True) transfer_to = TransactionSerializer(many= True) class Meta: model = Account fields = [ 'id', 'account_title', 'balance', 'remarks', 'transfer_from', 'transfer_to' ] read_only_fields = ['id'] # def after_transaction_balance(self, amount, sender, receiver): # sender_person = Account.objects.filter(user_id=sender) # sender_balance = # sender_balance # receiver_balance = Account.objects.filter(user_id=receiver)
UTF-8
Python
false
false
887
py
38
serializers.py
38
0.639233
0.639233
0
34
25.058824
69
ossuchas/aplinechatbot
11,106,785,461,141
c5601513565507ed26df7c757ff6853c1a6adac1
4c408e04c630080f4a404ae7e37005b7881840cf
/models/chatbot_mst_user.py
aec8d910771b2577433cec929d43c4b8fb446c27
[]
no_license
https://github.com/ossuchas/aplinechatbot
9849560493a31a65ca1ab8438e001aed1b604e01
a538aaaf6a029ddf5384defb8ab45fd8628a6115
refs/heads/master
2021-12-04T11:53:20.964858
2021-11-16T07:44:43
2021-11-16T07:44:43
217,653,051
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from db import db from typing import List from datetime import datetime class MstUserModel(db.Model): __tablename__ = "chatbot_mst_user" user_id = db.Column(db.Integer, primary_key=True) user_token_Id = db.Column(db.String(255)) user_name = db.Column(db.String(255)) user_full_name = db.Column(db.String(255)) user_type = db.Column(db.String(10)) user_position = db.Column(db.String(100)) user_sub_no = db.Column(db.String(10)) user_emp_id = db.Column(db.String(50)) user_empcode = db.Column(db.String(50)) user_status = db.Column(db.String(2)) createby = db.Column(db.String(20)) createdate = db.Column(db.DateTime) modifyby = db.Column(db.String(20)) modifydate = db.Column(db.DateTime) @classmethod def find_by_id(cls, _user_id: int) -> "MstUserModel": return cls.query.filter_by(user_id=_user_id).first() @classmethod def find_by_empcode(cls, _user_empcode: str) -> "MstUserModel": return cls.query.filter_by(user_empcode=_user_empcode).first() @classmethod def find_by_token_id(cls, _user_token_id: str) -> "MstUserModel": return cls.query.filter_by(user_token_Id=_user_token_id).first() @classmethod def check_auth_by_token_id(cls, _user_token_id: int) -> "MstUserModel": return cls.query.filter_by(user_token_Id=_user_token_id, user_status='A').first() @classmethod def check_VIP_auth_by_token_id(cls, _user_token_id: int) -> "MstUserModel": return cls.query.filter_by(user_token_Id=_user_token_id, user_status='A', user_type='VIP').first() @classmethod def check_VIP2_auth_by_token_id(cls, _user_token_id: int) -> "MstUserModel": return cls.query.filter_by(user_token_Id=_user_token_id, user_status='A', user_type='VIP2').first() @classmethod def check_clevel_auth_by_token_id(cls, _user_token_id: int) -> "MstUserModel": return cls.query.filter(cls.user_token_Id == _user_token_id, cls.user_status == 'A', cls.user_type.like('VIP%')).first() @classmethod def find_user_listrole(cls) -> List["MstUserModel"]: return cls.query.filter(cls.user_status == 'A', cls.user_position.notin_(('C Level', 'MD', 'CEO', 'FI', 'Head Off'))).all() def save_to_db(self) -> None: db.session.add(self) db.session.commit() def delete_from_db(self) -> None: db.session.delete(self) db.session.commit()
UTF-8
Python
false
false
2,592
py
81
chatbot_mst_user.py
76
0.60841
0.597994
0
65
37.876923
107
AVS18/beta23_techbuddies
15,736,760,209,468
4854550496080fb3f0220f18910780accdfb6e76
073eaa4971acf86a43a29aa6461175129af89d17
/base/urls.py
edba34121e6346f9dcb342b81213cba68d227bcb
[]
no_license
https://github.com/AVS18/beta23_techbuddies
55db9ea9918999cbb6eda83e7d4817980d27a34f
53cfc14f37d31032c849dd87ef740bc4200b317f
refs/heads/master
2023-01-11T06:35:15.379907
2020-11-19T10:20:53
2020-11-19T10:20:53
308,706,302
0
0
null
false
2020-11-02T19:55:21
2020-10-30T17:58:10
2020-11-02T19:51:28
2020-11-02T19:52:05
1,146
0
0
0
JavaScript
false
false
from django.contrib import admin from django.urls import path, include from . import views urlpatterns = [ path('',views.home,name="home"), path('contact',views.contact,name="contact"), path('login',views.login,name="login"), path('register',views.register,name="register"), path('logout',views.logout,name="logout"), path('dashboard',views.dashboard,name="dashboard"), path('profile',views.profile,name="profile"), path('display',views.display,name="display"), path('plasma_contact/<str:other>',views.plasma_contact,name="plasma_contact"), path('displayContact',views.displayContact,name="displayContact"), path('statusUpdate/<str:id>/<str:status>',views.statusUpdate,name="statusUpdate"), path('filter',views.filter,name="filter") ]
UTF-8
Python
false
false
783
py
20
urls.py
13
0.706258
0.706258
0
17
45.058824
86
an10nimus/MyCodeForces
3,547,643,006,353
66c80b2874f791c51bec2f8e9a8f34791dc3b451
41d52e12d62e1935317e87e03639fdfdd5008419
/Round_640_Div_4/G/G.py
f840e35236cda49d6b73864076a27c1282c5e28b
[ "MIT" ]
permissive
https://github.com/an10nimus/MyCodeForces
f971718cbc393f812e7c45e4a1b6f558d8019c8b
aef134222fe3218d28de308e7ef8a57d40b69179
refs/heads/master
2022-11-13T07:35:10.907765
2020-06-25T16:59:30
2020-06-25T16:59:30
264,267,763
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
PATTERN = { 0: [1, 4, 2, 5, 3], 1: [1, 4, 2, 6, 3, 5], 2: [1, 4, 2, 6, 3, 7, 5], 3: [1, 5, 2, 6, 3, 7, 4, 8], 4: [1, 4, 2, 5, 3, 7, 9, 6, 8] } def solve(n): if n < 4: ans = -1 elif n == 4: ans = [2, 4, 1, 3] else: ans = [] common = (n // 5) - 1 mod = n%5 for jj in range(common): ans += [jj*5 + k for k in PATTERN[0]] ans += [(common)*5 + k for k in PATTERN[mod]] return ans def output(arr): if isinstance(arr, list): print(' '.join(str(k) for k in arr)) else: print(arr) t = int(input()) for ___ in range(t): n = int(input()) ans = solve(n) output(ans)
UTF-8
Python
false
false
705
py
32
G.py
26
0.404255
0.329078
0
33
20.333333
53
AlexGrasley/Web-Movie-Database
19,610,820,709,237
8935b63eba80addd95206b059c4be9ee92781b5a
0733eb7f9128194cdc41dfa7778a8ec4834425a9
/backend/migrations/migrate.py
4e897416d38604a3437ce168ee0fab90c38be29a
[]
no_license
https://github.com/AlexGrasley/Web-Movie-Database
cdebe3372c05f7e4c23b5a60ee1fb98595b320d0
1ec176298a20d4e1d54a32f0e24e4247b605a9ad
refs/heads/master
2022-03-18T02:44:54.599465
2019-12-05T03:41:26
2019-12-05T03:41:26
211,689,620
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import mysql.connector as mariadb import sys import os from enum import Enum import time up_end = ".up.sql" down_end = ".down.sql" # Order matters here migrations = [ "theaters", "rooms", "movies", "customers", "showings", "tickets", ] def get_args(): up_or_down = sys.argv[1] if up_or_down not in ["up", "down", "loop", "data"]: print("You must migrate up or down. (or `loop` for both)", file=sys.stderr) sys.exit(2) return up_or_down def connect(): mariadb_connection = mariadb.connect( user=os.environ['MOVIE_USER'], password=os.environ['MOVIE_PASS'], database=os.environ['MOVIE_DB']) return mariadb_connection def load_queries(migrations, use_up: bool, multi=False): queries = [] for name in migrations: name = f"./{name}{up_end if use_up else down_end}" file = open(name, "r") text = file.readlines() data = "" if multi: for line in text: queries.append(line.rstrip()) else: for line in text: data += line queries.append(data) return queries def up(conn): queries = load_queries(migrations, True) run_queries(conn, queries) def down(conn): queries = load_queries(migrations, False) queries = queries[::-1] run_queries(conn, queries) def dummy_data(conn): queries = load_queries(["data"], True, True) run_queries(conn, queries) def run_queries(conn, queries): for query in queries: print(query) try: conn.execute(query) except Exception as e: print("Error executing query:", query, e, file=sys.stderr) def main(): up_or_down = get_args() connection = connect() cursor = connection.cursor(buffered=True) if up_or_down == "up": print("Going Up!") up(cursor) elif up_or_down == "down": print("Going Down!") down(cursor) elif up_or_down == "loop": print("You may now begin the push-up section.") print("Down") down(cursor) print("Up") up(cursor) elif up_or_down == "data": print("Going Dumb!") dummy_data(cursor) else: raise ValueError("Unreachable code reached.") if __name__ == "__main__": main()
UTF-8
Python
false
false
2,331
py
39
migrate.py
34
0.573145
0.571858
0
105
21.2
106
oshadmon/foglamp-south-wind-sensors
2,680,059,626,990
b99b7a24bf512cf17f9f980552f2dd8505115793
1a85fa9c101e45262df385695e97727ab0fbe01a
/python/foglamp/plugins/south/wind_sensors/ina219.py
52c4fe06f4ad433a1cb15106f0f1fae9be53d551
[ "Apache-2.0" ]
permissive
https://github.com/oshadmon/foglamp-south-wind-sensors
2a69d95a2c4061012ba4009d2322068ebd82e800
eb89096facd1f4ae7fee12b998b4f7193efabdeb
refs/heads/master
2020-04-10T19:12:46.689142
2018-12-17T18:38:37
2018-12-17T18:38:37
161,227,106
0
0
null
false
2018-12-10T20:06:31
2018-12-10T19:38:11
2018-12-10T20:04:39
2018-12-10T20:06:30
0
0
0
0
Python
false
null
# Based on - https://github.com/adafruit/Adafruit_CircuitPython_INA219/blob/master/adafruit_ina219.py from pyftdi.i2c import I2cController, I2cNackError, I2cPort try: import struct except ImportError: import ustruct as struct #pylint: disable=bad-whitespace # Internal constants: _INA219_DEFAULT_ADDRESS = 0x44 _REG_CONFIG = 0x00 _REG_SHUNTVOLTAGE = 0x01 _REG_BUSVOLTAGE = 0x02 _REG_POWER = 0x03 _REG_CURRENT = 0x04 _REG_CALIBRATION = 0x05 _CONFIG_BVOLTAGERANGE_32V = 0x2000 _CONFIG_SADCRES_12BIT_1S_532US = 0x0018 _CONFIG_GAIN_8_320MV = 0x1800 _CONFIG_BADCRES_12BIT = 0x0400 _CONFIG_MODE_SANDBVOLT_CONTINUOUS = 0x0007 """ === _to_signed function === """ def _to_signed(num:int=0)->int: if num > 0x7FFF: num -= 0x10000 return num class INA219: def __init__(self, i2c): self.slave=self.__i2c_slave_port(i2c) self.i2c_addr =_INA219_DEFAULT_ADDRESS # Multiplier in mA used to determine current from raw reading self._current_lsb = 0 # Multiplier in W used to determine power from raw reading self._power_lsb = 0 # Set chip to known config values to start self._cal_value = 4096 # call set_calibration_32V_2A self.set_calibration_32V_2A() def __i2c_slave_port(self, i2c:I2cController=None)->I2cPort: """ Get slave port :args: i2c:pyftdi.i2c.I2cController - I2C controller object :param: slave:pyftdi.i2c.I2cPort - :return: slave port object """ try: slave = i2c.get_port(_INA219_DEFAULT_ADDRESS) except: print('Unable to get Port for %s' %_INA219_DEFAULT_ADDRESS) exit(1) return slave def _write_register(self, reg:int=0x00, value:int=0): """ Write to register :args: reg:int - register to write to value:int - value to write to register :param: seq:bytearray - bytearry of value """ seq = bytearray([(value >> 8) & 0xFF, value & 0xFF]) self.slave.write_to(reg, seq) def _read_register(self, reg:int=0x00)->int: """ Read from register :args: reg:int - register to read from :param: buff : - raw result from read :return: result from read """ buf = self.slave.read_from(reg, 3) return (buf[0] << 8) | (buf[1]) def set_calibration_32V_2A(self): """ Configures to INA219 to be able to measure up to 32V and 2A of current. Counter overflow occurs at 3.2A. """ # By default we use a pretty huge range for the input voltage, # which probably isn't the most appropriate choice for system # that don't use a lot of power. But all of the calculations # are shown below if you want to change the settings. You will # also need to change any relevant register settings, such as # setting the VBUS_MAX to 16V instead of 32V, etc. # VBUS_MAX = 32V (Assumes 32V, can also be set to 16V) # VSHUNT_MAX = 0.32 (Assumes Gain 8, 320mV, can also be 0.16, 0.08, 0.04) # RSHUNT = 0.1 (Resistor value in ohms) # 1. Determine max possible current # MaxPossible_I = VSHUNT_MAX / RSHUNT # MaxPossible_I = 3.2A # 2. Determine max expected current # MaxExpected_I = 2.0A # 3. Calculate possible range of LSBs (Min = 15-bit, Max = 12-bit) # MinimumLSB = MaxExpected_I/32767 # MinimumLSB = 0.000061 (61uA per bit) # MaximumLSB = MaxExpected_I/4096 # MaximumLSB = 0,000488 (488uA per bit) # 4. Choose an LSB between the min and max values # (Preferrably a roundish number close to MinLSB) # CurrentLSB = 0.0001 (100uA per bit) self._current_lsb = .1 # Current LSB = 100uA per bit # 5. Compute the calibration register # Cal = trunc (0.04096 / (Current_LSB * RSHUNT)) # Cal = 4096 (0x1000) self._cal_value = 4096 # 6. Calculate the power LSB # PowerLSB = 20 * CurrentLSB # PowerLSB = 0.002 (2mW per bit) self._power_lsb = .002 # Power LSB = 2mW per bit # 7. Compute the maximum current and shunt voltage values before overflow # # Max_Current = Current_LSB * 32767 # Max_Current = 3.2767A before overflow # # If Max_Current > Max_Possible_I then # Max_Current_Before_Overflow = MaxPossible_I # Else # Max_Current_Before_Overflow = Max_Current # End If # # Max_ShuntVoltage = Max_Current_Before_Overflow * RSHUNT # Max_ShuntVoltage = 0.32V # # If Max_ShuntVoltage >= VSHUNT_MAX # Max_ShuntVoltage_Before_Overflow = VSHUNT_MAX # Else # Max_ShuntVoltage_Before_Overflow = Max_ShuntVoltage # End If # 8. Compute the Maximum Power # MaximumPower = Max_Current_Before_Overflow * VBUS_MAX # MaximumPower = 3.2 * 32V # MaximumPower = 102.4W # Set Calibration register to 'Cal' calculated above self._write_register(_REG_CALIBRATION, self._cal_value) # Set Config register to take into account the settings above config = _CONFIG_BVOLTAGERANGE_32V | \ _CONFIG_GAIN_8_320MV | \ _CONFIG_BADCRES_12BIT | \ _CONFIG_SADCRES_12BIT_1S_532US | \ _CONFIG_MODE_SANDBVOLT_CONTINUOUS self._write_register(_REG_CONFIG, config) def shunt_voltage(self): """ The shunt voltage (between V+ and V-) in Volts (so +-.327V) :param: raw_shunt_voltage:int - raw shunt voltage returnd shunt_voltage_mv:int - shunt voltage :return: shunt voltage in least signficant bit is 10uV which is 0.00001 volts """ raw_shunt_voltage = self._read_register(_REG_SHUNTVOLTAGE) shunt_voltage_mv = _to_signed(raw_shunt_voltage) return shunt_voltage_mv * 0.00001 def bus_voltage(self): """ The bus voltage (between V- and GND) in Volts :param: raw_voltage:int - raw bus voltage voltage_mv:int - bus voltage :return: bus voltage signficant bit is 4mV """ raw_voltage=self._read_register(_REG_BUSVOLTAGE) voltage_mv = _to_signed(raw_voltage) return voltage_mv * 0.001 def current_value(self): """ current through the shunt resistor in milliamps :param: raw_current;int raw current :return: current in milliamps """ self._write_register(_REG_CALIBRATION, self._cal_value) raw_current=self._read_register(_REG_CURRENT) raw_current=_to_signed(raw_current) return raw_current * self._current_lsb if __name__ == '__main__': i2c=I2cController() i2c.set_retry_count(1) i2c.configure('ftdi://ftdi:232h:FT2BZGR5/') gd=INA219(i2c) print("Bus Voltage: {} V".format(gd.bus_voltage())) print("Shunt Voltage: {} mV".format(gd.shunt_voltage() / 1000)) print("Current: {} mA".format(gd.current_value()))
UTF-8
Python
false
false
7,353
py
4
ina219.py
2
0.586563
0.540868
0
217
32.884793
101
rjwen2045458/UoE_CPSLP
18,502,719,131,186
60f800b0ff527818a283388f669fb53a558fea04
4d59e2b420e44d4524002a9dc0a33dbafadfbc69
/CPSLP4.12/wordfreq.py
95b8910194bdfa800bd2642aaf5845092db42f2a
[]
no_license
https://github.com/rjwen2045458/UoE_CPSLP
e458dab42333b3fdd9322900457763c71af1f8d2
220fe3bf8ec35ec800c383129a5e9d97c75298d3
refs/heads/master
2023-02-21T04:30:22.781613
2021-01-28T17:24:06
2021-01-28T17:24:06
333,824,813
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def get_word_frequencies(filename): # Empty dictionary for word frequencies freqs = {} # Read in the file with open(filename, 'r') as f: for line in f: for word in line.split(): if word in freqs: freqs[word] += 1 else: freqs[word] = 1 return freqs
UTF-8
Python
false
false
342
py
26
wordfreq.py
18
0.51462
0.508772
0
13
25.384615
43
Pupation/GalaxyGallery
17,703,855,228,937
da294adc83fbaec4f7b4ba5c0db05bca163061b1
be63aab08119daa41bb0cd18121e2b2ea8815131
/core/misc/chatbox/__init__.py
68795ecbb48d09d94bf63ea65b438dba30d125a1
[]
no_license
https://github.com/Pupation/GalaxyGallery
034f3742dd0e4609846f6a62983b33b234291ecd
109e0916983fe1e1eb21ac7f46d74c0955869663
refs/heads/master
2023-08-26T05:52:19.924253
2021-11-15T01:53:48
2021-11-15T01:53:48
415,851,807
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from fastapi import APIRouter chatbox_router = APIRouter( prefix='/chatbox', tags=['chat'] ) from .chatbox import *
UTF-8
Python
false
false
128
py
70
__init__.py
66
0.671875
0.671875
0
9
13.111111
29
abiraja2004/tw
6,021,544,197,634
d5b7e5f5f102e85498ed51c8d33e40f268a3ba9f
97e3e5747bd786e04616a3ffd49e12742c0688be
/frontend/gnip/pipeline.py
5414c29949d6c89d1b576adcec46c05a154dad88
[ "Apache-2.0" ]
permissive
https://github.com/abiraja2004/tw
26dc3692392f9b1b94ae06ddff70bec0f92ac21a
7c82278dce578dd73fb6fa465aed8585af224445
refs/heads/master
2020-03-08T09:39:04.428444
2016-01-28T02:28:54
2016-01-28T02:28:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from utils import MyThread from Queue import Queue, Empty from pprint import pprint import traceback class Pipeline(object): class Stage(object): def processItem(self, item): return item class DummyTimedStage(object): def __init__(self, t): self.t=t def processItem(self, item): time.sleep(self.t) return item class StageThread(MyThread): def __init__(self, stage, queues): MyThread.__init__(self) self.stage = stage self.source_queue = queues[0] self.next_queue = queues[1] self.errors_queue = queues[2] self.finish_flag = False stage.items_processed = 0 def stopWorking(self): self.source_queue.join() self.finish_flag = True def run(self): while not self.finish_flag: try: item = self.source_queue.get(True, 1) try: item = self.stage.processItem(item) except Exception, e: error_string = traceback.format_exc() self.errors_queue.put({"item": item, "error": error_string, "stage": self.__class__.__name__}) item = None pprint(error_string) self.stage.items_processed += 1 self.source_queue.task_done() if item: self.next_queue.put(item) except Empty, e: pass def __init__(self): self.source_queue = Queue() self.output_queue = self.source_queue self.errors_queue = Queue() self.stages = [] self.stage_queues = {} self.stage_threads = {} def getSourceQueue(self): return self.source_queue def appendStage(self, stage): self.stages.append(stage) if len(self.stages) > 1: #no es el primer stage self.stage_queues[stage] = (self.stage_queues[self.stages[-2]][1], Queue(), self.errors_queue) else: self.stage_queues[stage] = (self.source_queue, Queue(), self.errors_queue) self.output_queue = self.stage_queues[stage][1] self.stage_threads[stage] = Pipeline.StageThread(stage, self.stage_queues[stage]) self.stage_threads[stage].setName(stage.__class__.__name__) def startWorking(self): for stage in self.stages: self.stage_threads[stage].start() def stopWorking(self): for stage in self.stages: if self.stage_threads[stage].isAlive(): self.stage_threads[stage].stopWorking() self.stage_threads[stage].join() """ def join(self): self.source_queue.join() for stage in self.stages: self.stage_queues[stage][1].join() """ def getStats(self): res = {} res['Stage Count'] = len(self.stages) res['Errors Queue Count'] = self.errors_queue.qsize() res['Stages'] = [] for stage in self.stages: d = {} d['Stage Class'] = stage.__class__.__name__ d['Items Processed'] = stage.items_processed d['Source Queue count'] = self.stage_queues[stage][0].qsize() d['Output Queue count'] = self.stage_queues[stage][1].qsize() res['Stages'].append(d) res['Output Queue Size'] = self.output_queue.qsize() return res
UTF-8
Python
false
false
3,619
py
88
pipeline.py
53
0.520586
0.516994
0
104
33.798077
118
filipedc82/tddfirst
3,985,729,676,268
1f4826ac8571b78923c14f016b8b1da48f0556c8
d722d0936bf821ec1580dcf0e1a99ff6f7c17a54
/products/urls.py
252a72019d3e65d7b6336d6f82b5e74a8025fe9f
[]
no_license
https://github.com/filipedc82/tddfirst
5088981168c1c73aa2e0a4386283fefd73f7c1bf
badcd5e9b8dcb327903d75c2d5a20587226123f0
refs/heads/master
2016-09-09T20:30:08.900676
2015-05-07T17:03:25
2015-05-07T17:03:25
31,426,859
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
__author__ = 'Filipe' from django.conf.urls import patterns, include, url # from django import views from .views import home_page, ProductListView urlpatterns = patterns('', # Examples: url(r'^$', ProductListView.as_view(), name='product_list'), )
UTF-8
Python
false
false
274
py
33
urls.py
17
0.656934
0.656934
0
11
22.909091
63
marcospsviana/sysweb-venda-stoq
13,984,413,552,867
d29f7ba1283b9b547cfc0e94ee04810201fa002b
9af423160aee10041045100abde5e66a78304137
/sistema/sysweb.py
1bddd8cf70ac17dd58c6242b70356cdecc55582c
[]
no_license
https://github.com/marcospsviana/sysweb-venda-stoq
e93c872608a18fe2f6eed69914c14d5d6a550110
adc02f3560c54b7beaf88a2f1b30a055243f5c07
refs/heads/master
2018-11-13T03:17:12.806793
2018-10-23T13:20:52
2018-10-23T13:20:52
145,180,161
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate, MigrateCommand from flask_script import Manager app = Flask('sistema') db = SQLAlchemy(app) manager = Manager(app) manager.add_command('db', MigrateCommand) app.config.from_pyfile('config.py') migrate = Migrate(app, db) from views import * if __name__ == '__main__': manager.run()
UTF-8
Python
false
false
390
py
15
sysweb.py
6
0.735897
0.735897
0
18
20.611111
49
Sweenpet/mic-tac-toe
14,688,788,165,777
b4fa0e567b6e2a0467b3bd907f0b3af994853048
b242ae576323f4d4c0cb954afd3d64e3b35065ad
/mic_tac_toe/resource_locators/content_type.py
9882ff44e2773e98de2b671b17f4092526e8e37e
[]
no_license
https://github.com/Sweenpet/mic-tac-toe
4c85e8ff1661eeff1f4d0860c411d9b898297600
a225ea3f8ce570b59c1315b2c9a3d4e2a3d70bd8
refs/heads/master
2020-03-27T21:14:15.763763
2018-10-28T21:21:43
2018-10-28T21:21:43
147,127,005
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from enum import Enum class ContentType(Enum): """Document type, XLS, PDF, etc""" XLSX = 1
UTF-8
Python
false
false
102
py
26
content_type.py
21
0.627451
0.617647
0
6
15.833333
38
lijinlongweb/index.py
11,081,015,646,356
167b3cc8aad8d3ee1cac37840049df24d37d1af2
8d0456a86ec21bcdcff899ef947b2172d627f318
/indexpy/__init__.py
6cb0c71af7c8c63056d3d5e13557dce6677ca756
[ "Apache-2.0" ]
permissive
https://github.com/lijinlongweb/index.py
6443a2fd5756248ac7ade312936e495c703c60f7
819daf95a6432eac9c037b32234ba0105c80378e
refs/heads/master
2022-04-21T21:30:35.294680
2020-04-25T06:59:36
2020-04-25T06:59:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import logging import importlib from .utils import State from .applications import Index from .config import here, Config __all__ = ["logger", "Index", "Config"] app = Index() logger = logging.getLogger("indexpy") # global state g = State() sys.path.insert(0, here) # loading preset functions importlib.import_module("indexpy.preset")
UTF-8
Python
false
false
350
py
62
__init__.py
44
0.737143
0.734286
0
18
18.444444
41
ddasilva/attendance
8,873,402,458,214
ac05d6006133aa35081c95f41b65ecfaa8ae7346
8ebd4566eade160fa9d6a7edce62eeda287bea3d
/tests.py
803a5350a1e580f661cd4ff0f2a6da92dd2f4534
[]
no_license
https://github.com/ddasilva/attendance
2e664f87027b9fa51ca69e0b9608932c0bf2b347
d4d41e3b13d4d98fc833d3b22a2e51c5e4e699df
refs/heads/master
2020-03-27T11:21:51.044076
2018-08-28T17:25:39
2018-08-28T17:25:39
146,481,811
1
0
null
true
2018-08-28T17:20:30
2018-08-28T17:20:30
2018-08-28T13:36:31
2018-08-28T13:36:30
0
0
0
0
null
false
null
#!/usr/bin/env python3 import os import re import unittest class ReadmeTests(unittest.TestCase): """Unit tests for README.md""" def test_readme_file_exists(self): self.assertTrue(os.path.exists('README.md')) def test_readme_matches_regexp(self): with open('README.md') as fh: contents = fh.read() self.assertTrue( re.match( '(.*)It has been \d+ day(s?) since I missed a class(.*)', contents ) ) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
574
py
2
tests.py
1
0.533101
0.531359
0
27
20.259259
73
avman44/final-project-stepik-selenium-python
6,768,868,492,222
07adf55a9756e58d292ed7155040b520b4183e21
03adbc8014d33fbb4fef158e9daa0d6383e5235b
/pages/basket_page.py
a94ded1af89d0afea8e99067e7bc11f90b5b25be
[]
no_license
https://github.com/avman44/final-project-stepik-selenium-python
6b66bfd619b2af452ed2a6264c9f15336b143eb9
e0eb7384ec184ec61b0318aec0a8851cf0baf014
refs/heads/master
2023-03-20T03:59:51.237161
2021-03-11T15:25:02
2021-03-11T15:25:02
342,881,524
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from .base_page import BasePage from .locators import BasketPageLocators class BasketPage(BasePage): def should_be_empty_basket(self): self.should_be_no_items_in_basket() self.should_be_message_about_basket_is_empty() def should_be_no_items_in_basket(self): assert self.is_not_element_present(*BasketPageLocators.BASKET_ITEMS), \ "There is an item in the basket, but should not be" def should_be_message_about_basket_is_empty(self): assert self.is_element_present(*BasketPageLocators.BASKET_IS_EMPTY_MESSAGE), \ "Missing message \"basket is empty\""
UTF-8
Python
false
false
621
py
6
basket_page.py
4
0.695652
0.695652
0
15
40.4
86
danhaus/quadcopter-autopilot
1,778,116,473,708
de25c2d6ed8f44482dcf5443d9fbcecd03d836bc
3efbfb5ab2968909c4fde860487d7b75f5d24659
/src/hyper_propelled_cow/scripts/controller_simple
7a4ba8283c767881962b441edcc709ca709c646d
[]
no_license
https://github.com/danhaus/quadcopter-autopilot
83d3ec5e05d1d1c81bfdc1d593452ba53708e0f6
5a096ffbb073fad67673886209cf0574a58cbbb3
refs/heads/master
2021-03-22T00:57:15.914707
2018-08-07T22:37:15
2018-08-07T22:37:15
119,521,702
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import rospy import time from std_msgs.msg import Int16, Int32 import propelled_cow.PID_control as _PID class Controller(object): """docstring for Controller""" def __init__(self): self.distance_demand = rospy.get_param('distance_treshold') self.PID_vals = rospy.get_param('distance') self.controller = _PID.PID(self.PID_vals['Kp'], self.PID_vals['Ki'], self.PID_vals['Kd'], 2, 0) # THE LAST TWO VALUES NEEDS TO BE ADJUSTED self.throttle_pub = rospy.Publisher('PWM_throttle', Int16, queue_size=2) self.servo_pub = rospy.Publisher('servo_angle', Int16, queue_size=10) rospy.init_node('controller', anonymous=True) self.PWM_limits = rospy.get_param('PWM') self.PWM_current = self.PWM_limits['min'] # Start countdown. i = 10 rospy.logwarn("Take off counter started.") while (i > 0): rospy.logwarn("Countdown: " + str(i)) time.sleep(1) i = i - 1 rospy.logwarn("Launched.") self.start = time.time() rospy.Subscriber('distance_fused', Int16, self.main_control) rospy.spin() # IT MIGHT NOT NEED TO BE HERE def main_control(self, data): self.distance = data.data err = self.distance_demand - self.distance PWM_correction = -self.controller.update_PID(err) PWM_raw = self.PWM_current + PWM_correction self.PWM_current = _PID.saturation(PWM_raw, self.PWM_limits['min'], self.PWM_limits['max']) self.throttle_pub.publish(self.PWM_current) # drop the bean bag if the drone is higher than 700 mm and the autopilot runs for at lest 10 secs if (self.distance >= (self.distance_demand - 100 )): # if the drone is higher than 800 - 100 mm now = time.time() if (now - self.start > 10): # if the drone is in the air for more than 10 secs self.servo_pub.publish(170) if __name__ == '__main__': controller = Controller()
UTF-8
Python
false
false
1,804
47
controller_simple
27
0.689024
0.666297
0
52
33.692308
140
ggonzaleze/Programming101
14,328,010,936,144
a91d37423b018b260c7a919558e993d2ce289a09
ddf709da01a4a4f53e547de9671c159d06385250
/MaxInList.py
cc5bfd7757fcb631865b17b1555ab3f57d7f0e2d
[]
no_license
https://github.com/ggonzaleze/Programming101
53ef2bb76af181d3193f10222c472198279623e8
025de1ccd278aabf71deb1a22135c208304bcb69
refs/heads/master
2021-05-14T23:44:01.939521
2017-12-06T04:30:07
2017-12-06T04:30:07
104,280,422
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from functools import reduce def max_in_list(numbers): return reduce(lambda x,y : x if x > y or x == y else y , numbers) numbers = [-5,10,6,0] print("The maximum number is: ",max_in_list(numbers))
UTF-8
Python
false
false
210
py
59
MaxInList.py
56
0.647619
0.62381
0
7
28
69
umedoblock/fugou
13,993,003,452,044
3b584d902a186169b2ea4dfa2b20b586c2468ff1
ee87e89befa0d4bf353dcf682b6467f9daaf657e
/src/foo/baz.py
143417c8ba0618e5834d06180764cc3e2e4d4246
[ "BSD-3-Clause", "MIT" ]
permissive
https://github.com/umedoblock/fugou
43046056ce5f20b81d76e3c8e3149717b63708ed
45d95f20bba6f85764fb686081098d92fc8cdb20
refs/heads/master
2021-07-15T15:26:30.856753
2018-11-26T23:44:18
2018-11-26T23:44:18
152,105,228
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from abc import ABCMeta, abstractmethod, abstractproperty # class Baz_abstract(metaclass=ABCMeta): class Baz_abstract(object): def __init__(self): self.v_abstract = 8 self.p_abstract = 9 dic = self.__dict__ print('Baz_abstract id(__dict__) = {:x}'.format(id(dic))) print('Baz_abstract.__dict__ =', Baz_abstract.__dict__) print() class Baz_base(Baz_abstract): '''help for Baz_base''' def __init__(self): super().__init__() self.v_base = 8 self.p_base = 9 dic = self.__dict__ print('Baz_base id(__dict__) = {:x}'.format(id(dic))) print('Baz_base.__dict__ =', Baz_base.__dict__) print() class Baz(Baz_base): '''help for Baz''' def __init__(self): super().__init__() self.v_baz = 8 self.p_baz = 9 dic = self.__dict__ print('Baz id(__dict__) = {:x}'.format(id(dic))) # del self.v print(' Baz.__dict__ =', Baz.__dict__) print() baz = Baz() dic = baz.__dict__ print('baz id(__dict__) = {:x}'.format(id(dic))) dir_ = dir(baz) for d in dir(baz): pass # print('{:>16}'.format(d), getattr(baz, d)) print(dic) print('dir_ =', type(dir_)) dic = baz.__dict__ print('baz id(__dict__) = {:x}'.format(id(dic)))
UTF-8
Python
false
false
1,297
py
279
baz.py
67
0.509638
0.50347
0
47
26.595745
65
LucasYang-JLL/horizon_2080_fullstack
9,156,870,276,642
e95ddd1bbb94a1dc22dc4fe6365c7e6d6fa1fa4a
d4ac7a8e5844df254957c1a52170173a765a9b69
/horizon_2080/users/views.py
c3eca38e65af41d88166084b55d94cf27c1de362
[]
no_license
https://github.com/LucasYang-JLL/horizon_2080_fullstack
cbd417e325d0542a74934ad8e6b30d14be0c66c3
888c10013b473e0312a0847a80f4088214619cf2
refs/heads/master
2021-07-08T16:45:47.528947
2019-04-17T06:42:00
2019-04-17T06:42:00
153,553,962
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.http import HttpResponseRedirect from django.http import JsonResponse from django.contrib.auth import logout from rest_framework import generics def Logout(request): logout(request) # Redirect to a success page. return HttpResponseRedirect("/landing") def UserQuery(request): userID = request.user.name # my user id # Return json response return JsonResponse({"user": request.user.name}) def UserAndSubsetQuery(request): userID = request.user.name # my user id userArr = request.user.report_to_me.all() # the users that report to me nameList = [] for user in userArr: nameList.append({"name": user.name, "department": user.department}) return JsonResponse({"userList": nameList}) def ActionAccess(request): actionAccessArr = request.user.own_action_from.all() nameList = [] for user in actionAccessArr: nameList.append({"name": user.name, "department": user.department}) return JsonResponse({"userList": nameList})
UTF-8
Python
false
false
1,006
py
162
views.py
147
0.7167
0.7167
0
29
33.724138
75
yash-analytics/HackerRank_Solutions
8,435,315,801,386
0fa40cacf5cc0e528353583b82984ec4929e1e4c
0685e1ae8e50f9211ca7675b73633757c188bb80
/10_Days_of_Statistics/Day 1/Quartiles.py
5e8400c1017ade8b3f5a631a9b037b45b12dd31f
[]
no_license
https://github.com/yash-analytics/HackerRank_Solutions
48f398cee065cb4c4d54efdec82e1bbee59c4e5b
5dbd2b65f80414354c9d027aab054582f9f1b9f8
refs/heads/master
2023-08-25T02:08:59.483089
2020-05-27T17:19:29
2020-05-27T17:19:29
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Getting the input no_of_elements = int(input()) elements = list(map(int, input().split())) elements.sort() # Defining Median def median(my_list): length = len(my_list) if (length % 2 == 0): return ((my_list[length // 2 - 1] + my_list[length // 2] / 2)) else: return int(my_list[length // 2]) # Separating case for odd and even if (no_of_elements % 2 == 0): print(median(elements[0:no_of_elements // 2])) print(median(elements)) print(median(elements[no_of_elements // 2:])) else: print(median(elements[0:(no_of_elements - 1) // 2])) print(median(elements)) print(median(elements[(no_of_elements + 1) // 2:]))
UTF-8
Python
false
false
668
py
40
Quartiles.py
36
0.607784
0.582335
0
26
24.730769
70
spolichnowski/Blog-1
8,203,387,535,482
104268e1551be99ffcd7d0d78ad1f9a87ebb9171
02736f7d0419462ab8b336394645640e99e00c5a
/articles/forms.py
b785d20333548132ce5f2781d838aa728ca7ffdf
[]
no_license
https://github.com/spolichnowski/Blog-1
a5016d002de2f2a80cad1f0de6ecb3b84fb8139b
a735e682841909592b188a4b45f1114a3938a06c
refs/heads/master
2021-01-19T00:03:23.425483
2016-11-05T10:37:24
2016-11-05T10:37:24
72,918,688
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django import forms class EmailMessageForm(forms.Form): name = forms.CharField(max_length=70) from_email = forms.EmailField() message = forms.CharField(required=True, widget=forms.Textarea)
UTF-8
Python
false
false
239
py
6
forms.py
3
0.65272
0.644351
0
8
28.875
52
linbaiwpi/RoadNet-RT
18,846,316,525,750
1fe0c9e8b8b3bf2e0ebc2dca653fe31d523d85df
f24c5cd5957473f497f4089bc9ff9a72b5814a68
/roadnet/net.py
b341d6af7d8be0892790aade4981d3436202cdc5
[ "MIT" ]
permissive
https://github.com/linbaiwpi/RoadNet-RT
d7faec43475300b94a185d874532af0a1e284ab0
624a1051e0502b60abe6122450ea53f80e9e4f8a
refs/heads/main
2023-04-19T02:47:32.829009
2021-05-11T20:54:07
2021-05-11T20:54:07
310,941,805
12
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import keras as K import keras.layers as L import keras.models as M import tensorflow as tf def resnetLayer(x_in, filters, strides, name): # main branch x = L.Conv2D(filters=filters, kernel_size=3, strides=strides, padding="same", name=name+"_conv1")(x_in) x = L.BatchNormalization(name=name+"_bn1")(x) x = L.ReLU(name=name+"_relu1")(x) x = L.Conv2D(filters=filters, kernel_size=3, strides=1, padding="same", name=name+"_conv2")(x) x = L.BatchNormalization(name=name+"_bn2")(x) # shortcut x_sc = L.Conv2D(filters=filters, kernel_size=3, strides=strides, padding="same", name=name+"_conv_sc")(x_in) x_sc = L.BatchNormalization(name=name+"_bn_sc")(x_sc) # add x = L.add([x, x_sc]) x = L.ReLU(name=name+"_relu2")(x) return x def ConvBNReLU(x, filter=64, kernel_size=3, strides=1, use_bias=True, name="ConvBNReLU"): x = L.Conv2D(filters=filter, kernel_size=kernel_size, strides=(strides, strides), padding="same", use_bias=use_bias, name=name+"_conv")(x) x = L.BatchNormalization(name=name+"_bn")(x) x = L.ReLU(name=name+"_relu")(x) return x def atrousConvBNReLU(x, filter=64, kernel_size=3, strides=1, dilation_rate=2, use_bias=True, name="aConvBNReLU"): x = L.Conv2D(filters=filter, kernel_size=kernel_size, strides=(strides, strides), dilation_rate=(dilation_rate, dilation_rate), padding="same", use_bias=use_bias, name=name+"_conv")(x) x = L.BatchNormalization(name=name+"_bn")(x) x = L.ReLU(name=name+"_relu")(x) return x def AttentionRefinement(x, out_channel=64): x = ConvBNReLU(x, out_channel, 3, 1, use_bias=False, name="ARM") x_sc = L.GlobalAveragePooling2D(data_format='channels_last', name="avg_pool")(x) x_sc = L.Reshape((1, 1, out_channel))(x_sc) x_sc = L.Conv2D(out_channel, 1, strides=1, padding="same", name="ARM_1x1")(x_sc) x_sc = L.Activation('sigmoid')(x_sc) x = L.multiply([x_sc, x]) return x def FeatureFusion(sp_out, cp_out): ffm_cat = L.concatenate([sp_out, cp_out]) ffm_conv = ConvBNReLU(ffm_cat, 64, 1, 1, use_bias=False, name="ffm_conv1") ffm_cam = L.GlobalAveragePooling2D(data_format='channels_last')(ffm_conv) ffm_cam = L.Reshape((1, 1, 64))(ffm_cam) ffm_cam = L.Conv2D(filters=64, kernel_size=1, strides=1, padding="same", use_bias=False, name="ffm_conv2")(ffm_cam) ffm_cam = L.ReLU()(ffm_cam) ffm_cam = L.Conv2D(filters=64, kernel_size=1, strides=1, padding="same", use_bias=False, name="ffm_conv3")(ffm_cam) ffm_cam = L.Activation('sigmoid')(ffm_cam) ffm_cam = L.multiply([ffm_conv, ffm_cam]) ffm_cam = L.add([ffm_cam, ffm_conv]) return ffm_cam class roadnet_rt(): def __init__(self, input_shape=(160, 600), num_class=2, activation='sigmoid'): self.input_shape = input_shape self.num_class = num_class self.activation = activation def build(self): x_in = L.Input(shape=(self.input_shape[0], self.input_shape[1], 3)) # spatial path convBnRelu = ConvBNReLU(x_in, 64, 7, 2, use_bias=False, name="convBnRelu") convBnRelu_1 = ConvBNReLU(convBnRelu, 64, 3, 2, use_bias=False, name="convBnRelu1") convBnRelu_2 = ConvBNReLU(convBnRelu_1, 64, 3, 2, use_bias=False, name="convBnRelu2") sp_out = ConvBNReLU(convBnRelu_2, 128, 1, 1, use_bias=False, name="convBnRelu3") # context path #x_div_2 = L.Lambda(lambda image: K.backend.resize_images(image, 0.5, 0.5, 'channels_last', 'bilinear'))(x_in) # x_div_2 = L.Lambda(lambda image: tf.image.resize_images(image, (int(self.input_shape[0]/2), int(self.input_shape[1]/2))))(x_in) x_div_2 = L.Lambda(lambda image: tf.image.resize(image, (int(self.input_shape[0]/2), int(self.input_shape[1]/2))))(x_in) cp_x = L.Conv2D(filters=64, kernel_size=3, strides=1, padding="same", use_bias=False, name="cp_conv")(x_div_2) cp_x = resnetLayer(cp_x, 64, 2, "backbone_1") cp_x = resnetLayer(cp_x, 128, 2, "backbone_2") cp_x_1 = atrousConvBNReLU(cp_x, 32, 3, 1, 2, use_bias=False, name="aconv1") cp_x_2 = atrousConvBNReLU(cp_x, 32, 3, 1, 4, use_bias=False, name="aconv2") cp_x_3 = atrousConvBNReLU(cp_x, 32, 3, 1, 8, use_bias=False, name="aconv3") cp_x_4 = atrousConvBNReLU(cp_x, 32, 3, 1, 16, use_bias=False, name="aconv4") cp_x = L.concatenate([cp_x_1, cp_x_2, cp_x_3, cp_x_4]) cp_arm = AttentionRefinement(cp_x, 64) cp_out = L.concatenate([cp_arm, cp_x]) # fusion ffm_cam = FeatureFusion(sp_out, cp_out) ffm_cam = L.Conv2D(filters=self.num_class, kernel_size=3, strides=1, padding="same", use_bias=False, name="output")(ffm_cam) x_out = L.Activation(self.activation)(ffm_cam) x_out = L.UpSampling2D(size=8, interpolation='bilinear', name="upsample")(x_out) return M.Model(inputs=x_in, outputs=x_out) # based on BiSeNet_lite, replae the AvePooling to GlobalAvgPooling2D # move AttentionRefinement to context path # first conv kernel size = 7 if __name__ == '__main__': model = BiSeNet_mod4_base3((280, 960), 1).build() model.summary()
UTF-8
Python
false
false
5,141
py
15
net.py
12
0.634896
0.601245
0
106
47.5
137
hhlgithub/Gaiter
4,655,744,549,679
803e2932d9bac3966899797ca4c54ffc85745f7a
f44cee6652d0699a4426a6a3243ae35d80292c43
/personal_classifier/ada_boost.py
e37bce4493e996d85fcf9a7b80bffdd39b409e58
[ "MIT" ]
permissive
https://github.com/hhlgithub/Gaiter
b89db580d817fa049e30e434390863f5d5e419be
a456ab59b67ed9eaf4896b423c0b09589f28f19f
refs/heads/master
2017-05-09T11:46:12.399447
2014-12-16T13:03:31
2014-12-16T13:03:31
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from sklearn.ensemble import AdaBoostClassifier from personal_classifier import PersonalClassifier class AdaBoost(PersonalClassifier): def __init__(self, data_set, labels): super(AdaBoost, self).__init__(data_set, labels) def train(self): x = self.data_set y = self.labels clf = AdaBoostClassifier() clf.fit(x, y) self.set_classifier(clf)
UTF-8
Python
false
false
399
py
28
ada_boost.py
27
0.656642
0.656642
0
16
24
56
SyedUmaidAhmed/Buzzers-and-PIR-Sensor-All-with-Raspberry-Pi-
1,288,490,191,313
da360595b8885c64b33f5065c31875a988387038
53d6b530227605b007f3a62356e2667a9c897193
/bz_2.py
c9be7165c657070a72e9e52e8a5d9f5b00a5f1ff
[]
no_license
https://github.com/SyedUmaidAhmed/Buzzers-and-PIR-Sensor-All-with-Raspberry-Pi-
b139b938369a523fc1a1436a47d8eaea4484e664
6a6ed9be04c6f5cbe3ff7ee35364f1381fecb6de
refs/heads/main
2023-03-18T20:14:05.561090
2021-02-27T21:06:54
2021-02-27T21:06:54
342,958,242
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) BUZZER= 23 buzzState = False GPIO.setup(BUZZER, GPIO.OUT) while True: buzzState = not buzzState GPIO.output(BUZZER, buzzState) time.sleep(1)
UTF-8
Python
false
false
237
py
9
bz_2.py
8
0.742616
0.729958
0
13
17.307692
34
umarhussain88/rhls_pdf_generator
9,165,460,248,354
aa9f1bb5a4ccd6034768c0e081fac1bcfc1463e8
2b2bd49738c04db885f9b0ebed4cb7b1ede05be8
/students/migrations/0001_initial.py
7517ade3f117d2a78004f057c1b691c005dfa588
[]
no_license
https://github.com/umarhussain88/rhls_pdf_generator
62857ebcd39ec0df32b7f9ae2db2f264b550cfc1
86cbcaab6f4960320f354783c3d5a5947e50f583
refs/heads/main
2023-07-17T14:11:42.348488
2021-09-05T22:26:27
2021-09-05T22:26:27
399,273,471
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 3.2.6 on 2021-08-25 22:12 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Student', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('student_id', models.IntegerField()), ('first_name', models.CharField(max_length=255)), ('last_name', models.CharField(max_length=255)), ('school_name', models.CharField(max_length=255)), ('subject_name', models.CharField(max_length=255)), ('time_from', models.CharField(max_length=255)), ('time_to', models.CharField(max_length=255)), ('parents_email', models.CharField(max_length=255)), ], ), ]
UTF-8
Python
false
false
948
py
16
0001_initial.py
10
0.556962
0.518987
0
28
32.857143
117
tetianakh/playground
10,737,418,271,493
918394a01d86228450d32963b0672b795802be09
d2642b02511811a6e85f8c004ac72765001b7459
/django-test/test_project/user_generator.py
e7952ae11a4c0dfe2b50278791b99f79090ef9b0
[]
no_license
https://github.com/tetianakh/playground
58920751d647b14e4409e125a3221ce106fe9b00
282959bc2f6bb7782192c68e6ceff227ff64a5fc
refs/heads/master
2021-09-06T20:12:45.360806
2018-02-10T22:34:09
2018-02-10T22:34:09
117,529,602
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from faker import Faker import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_project.settings') import django django.setup() from users.models import User fake = Faker() def generate_users(N=10): for _ in range(N): user = User( first_name=fake.first_name(), last_name=fake.last_name(), email=fake.email() ) user.save() if __name__ == '__main__': print("Generating some fake users...") generate_users(20) print("Done!")
UTF-8
Python
false
false
523
py
26
user_generator.py
15
0.586998
0.57935
0
28
17.678571
72
tastypenguinbacon/just-messing-arround
7,997,229,124,401
5af3982ddee7df965abf44e2046d95fd7686fc42
e001481b356cdf23ed1a070681ab5385ddc83b39
/algo/fractal/python-mandelbrot-inverse.py
18b399fe59fdd5f2b851f45ba74be6fdb580c537
[]
no_license
https://github.com/tastypenguinbacon/just-messing-arround
5b276b614ad5be4c7b6dc4c97eb551d1c0da1057
12f3efd1c003f550934465bf9c57966479cd4707
refs/heads/master
2022-12-24T05:47:47.634229
2019-11-30T17:19:50
2019-11-30T17:19:50
82,855,657
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import matplotlib.pyplot as plt x = np.linspace(-1.5, 4.5, 2000) y = np.linspace(2, -2, 1500) c = np.repeat(np.array([x]), y.size, axis=0) + np.repeat(np.array([y]), x.size, axis=0).T * 1j c = 1 / c z = np.zeros(c.shape, dtype=np.complex) img = np.zeros(c.shape) for i in range(128): z = z ** 2 + c img[np.absolute(z) < 2] += 1 plt.imshow(np.sqrt(img)) plt.show()
UTF-8
Python
false
false
396
py
42
python-mandelbrot-inverse.py
30
0.606061
0.545455
0
17
22.235294
94
instance01/py-S-MCTS
4,466,766,021,323
dfe5a99b420cb890b96e538dbd3026f0f7e59f74
95cd30d17d99b3827ed926ff5b1e09077e61d729
/show_profile.py
b15a40b9f8cc34703c976be0f3526ef3c3ba8565
[ "MIT" ]
permissive
https://github.com/instance01/py-S-MCTS
12d3d47541932ed426bf22901603ff6704385b65
cad67da8bb515126311040674d5e6da77f47c90f
refs/heads/master
2021-03-13T19:25:00.826462
2020-04-16T20:03:22
2020-04-16T20:03:22
246,704,443
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pstats name = '3.profile' out = pstats.Stats(name) print('sort by cumulative') out.sort_stats('cumulative').print_stats(20) print('sort by total time') out.sort_stats('time').print_stats(20)
UTF-8
Python
false
false
201
py
13
show_profile.py
12
0.721393
0.696517
0
10
19.1
44
ngglasgow/clamp_ephys
867,583,395,330
db6f9a9f6b5eb8d186a6b2c106007d2fca786852
2d64c81dad73bd9f03288b9004b589ac5c90d555
/single_cell_test_analysis.py
48ad8d4c7723822307e512d20cb8c1cc02352ab4
[ "MIT" ]
permissive
https://github.com/ngglasgow/clamp_ephys
11fa657b4f746b9adb9046fc68ae976e4b0a4efa
60f1f490837939a0afcf9cf5bdd123fb51546269
refs/heads/master
2022-11-04T23:02:50.220537
2020-05-26T15:30:29
2020-05-26T15:30:29
223,270,172
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import clamp_ephys import pandas as pd import os import scipy import matplotlib import matplotlib.pyplot as plt '''####################### SET THE PROPER PATH YOU WANT ########################### ''' paths = clamp_ephys.workflows.file_structure('local', 'Injected_GC_data/VC_pairs') tables = paths.tables figures = paths.figures '''####################### SET PROPER PATH_TO_DATA_NOTES ########################## ''' data_path = os.path.join(os.getcwd(), 'test_data', 'p2_data_notes.csv') ''' ################### SET/CHECK THESE PARAMETERS BEFORE RUNNING ################## ''' lowpass_freq = 500 # Hz stim_time = 500 # ms post_stim = 250 # ms, amount of time after stimulus to look for max value tp_start = 5 # ms, time of start of test pulse vm_jump = 10 # mV, test pulse voltage jump pre_tp = 3 # ms, amount of time before test pulse start to get baseline unit_scaler = -12 # unitless, scaler to get back to A, from pA amp_factor = 1 # scaler for making plots in pA fs = 25 # kHz, the sampling frequency '''#################### THIS LOOP RUNS THE SINGLE CELL ANALYSIS #################### ''' cell_path = os.path.join(os.getcwd(), 'test_data', 'JH200303_c1_light100.ibw') data = clamp_ephys.workflows.cell(cell_path, fs=fs, path_to_data_notes=data_path, timepoint='p2', amp_factor=amp_factor, drop_sweeps=True) data.get_raw_peaks(stim_time, post_stim) data.filter_traces(lowpass_freq) # collect peak amplitudes and time to peak of all the max peaks peaks_max = data.get_filtered_peaks(stim_time, post_stim) time_to_peak_max = (data.peaks_filtered_indices / fs) - stim_time def plot_half_width(trace, data): '''plots a trace with the peak identified and the half width drawn trace: int index of the trace you want to see data: data object needs to be a data object ''' x = data.traces_filtered[trace] peak = data.peaks_filtered_indices[trace] hw_data = data.get_fwhm_peak_max().iloc[trace, 1:].values fig, axs = plt.subplots() axs.plot(x) axs.plot(peak, x[peak], 'x') axs.hlines(hw_data[0] * -1, hw_data[1], hw_data[2], color='r') return fig #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # test new class function; collect FWHMs of all the peak IPSCs halfwidths_peak_max = data.get_fwhm_peak_max()['Max peak half-width (ms)'] # pick an example trace to plot the actual half width of a given peak %matplotlib widget fig = plot_half_width(1, data) import numpy as np # we will use this later, so import it now from bokeh.io import output_notebook, show from bokeh.plotting import figure output_notebook() trace = data.traces.iloc[:, 0].values sd = trace[3400 * data.fs:3900 * data.fs].std() time = np.arange(0, len(trace) / data.fs, 1 / data.fs) peaks, properties = scipy.signal.find_peaks(trace * -1, prominence=30) time_peaks = (peaks / data.fs) wavelet = scipy.signal.ricker(40, 4) ctime = np.arange(0, len(cwt)/ data.fs, 1 / data.fs) cwt = scipy.signal.convolve(trace, wavelet) p = figure(plot_width=800, plot_height=400) p.line(time, trace, line_width=2) p.circle(time_peaks, trace[peaks], size=5, line_color="navy", fill_color="orange", fill_alpha=0.5) p.line(ctime, cwt, color='red') show(p) drop_path = '/home/nate/urban/clamp_ephys/test_data/dropped_sweeps.csv' dropped_sweeps = pd.read_csv(drop_path, index_col=[0]) if data.filename in dropped_sweeps.index: strsweeps = dropped_sweeps.loc[data.filename].values[0][1:-1].split(', ') drop_sweeps = [int(sweep) for sweep in strsweeps] drop_sweeps show(p) scipy.signal.convolve()
UTF-8
Python
false
false
3,641
py
31
single_cell_test_analysis.py
22
0.63911
0.620159
0
103
34.359223
138
nagyist/ParaView
14,233,521,636,152
43d43111efc48d7f08f09873fdbcf4546275888b
09d1138225f295ec2e5f3e700b44acedcf73f383
/Examples/PythonClient/eventdriver.py
487353c3a71be4cabc3b3f8ce3066042378b3147
[ "BSD-3-Clause" ]
permissive
https://github.com/nagyist/ParaView
e86d1ed88a805aecb13f707684103e43d5f6b09f
6810d701c44b2097baace5ad2c05f81c6d0fd310
refs/heads/master
2023-09-04T07:34:57.251637
2023-09-03T00:34:36
2023-09-03T00:34:57
85,244,343
0
0
BSD-3-Clause
true
2023-09-11T15:57:25
2017-03-16T21:44:59
2022-12-16T20:27:24
2023-09-11T15:57:22
238,433
0
0
0
C++
false
false
from PyQt4 import QtCore, QtNetwork, QtGui def exit(): import sys sys.exit(0) def sendMessage(): global sock, timer bl = QtCore.QByteArray() out = QtCore.QDataStream(bl, QtCore.QIODevice.WriteOnly) out.writeDouble(10) sock.write(bl) timer.start(30) app = QtGui.QApplication(['Event Driver']) sock = QtNetwork.QTcpSocket() sock.connectToHost("localhost", 12345) sock.disconnected.connect(exit) timer = QtCore.QTimer() timer.setSingleShot(True) timer.timeout.connect(sendMessage) timer.start(30) app.exec_()
UTF-8
Python
false
false
548
py
5,743
eventdriver.py
3,836
0.713504
0.689781
0
30
17.266667
60
juditacs/deep-morphology
7,232,724,975,762
270d956e12dddd9cd9603d5a3e1c372ce7811ae3
d36982d5629d31063ffc2bb87ce558316645d6f6
/deep_morphology/probe.py
93bc3770930e9e9f0a8e8f14387d9c2978fa2fda
[ "MIT" ]
permissive
https://github.com/juditacs/deep-morphology
7ec32c671ce88b291ae04021f7a4e5df8a9649a6
090c17e604499a3430ea835a6340fa3abdc6ea83
refs/heads/master
2021-05-26T08:08:07.629377
2020-11-26T15:34:20
2020-11-26T15:34:20
128,024,350
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Copyright © 2018 Judit Acs <judit@sch.bme.hu> # # Distributed under terms of the MIT license. from argparse import ArgumentParser import os import gzip import logging import platform import numpy as np import torch import torch.nn as nn from deep_morphology.config import Config, InferenceConfig from deep_morphology import data as data_module from deep_morphology import models as model_module from deep_morphology.utils import find_last_model from deep_morphology.data.base_data import Vocab from deep_morphology.models.mlp import MLP from deep_morphology.models.base import BaseModel from deep_morphology.experiment import Result use_cuda = torch.cuda.is_available() def to_cuda(var): if use_cuda: return var.cuda() return var def parse_args(): p = ArgumentParser() p.add_argument("-c", "--config", type=str, help="YAML config file location") p.add_argument("--encoder", type=str, default=None) p.add_argument("--embedding", type=str, default=None) p.add_argument("--train-file", type=str, default=None) p.add_argument("--dev-file", type=str, default=None) p.add_argument("--debug", action="store_true", help="Do not raise exception when the working " "directory is not clean.") return p.parse_args() class DataWrapper: def __init__(self, data, labels): self.data = data self.labels = labels self.tgt_field_idx = 1 def batched_iter(self, batch_size): starts = list(range(0, len(self.data), batch_size)) np.random.shuffle(starts) for start in starts: end = start + batch_size data = self.data[start:end] labels = self.labels[start:end] yield (data, labels) class EmbeddingWrapper(nn.Module): def __init__(self, train_data, dev_data, embedding_fn): super().__init__() self.train_data = [] self.train_labels = [] self.dev_data = [] self.dev_labels = [] self.vocab_label = {} with open(train_data) as f: for line in f: word, label = line.strip().split("\t") lab_idx = self.vocab_label.setdefault(label, len(self.vocab_label)) self.train_data.append(word) self.train_labels.append(lab_idx) with open(dev_data) as f: for line in f: word, label = line.strip().split("\t") lab_idx = self.vocab_label.setdefault(label, len(self.vocab_label)) self.dev_data.append(word) self.dev_labels.append(lab_idx) if embedding_fn.endswith('.gz'): with gzip.open(embedding_fn, "rt") as f: self.load_embedding(f) else: with open(embedding_fn) as f: self.load_embedding(f) prev = len(self.train_data) self.train_data, self.train_labels = self.filter_data(self.train_data, self.train_labels) self.dev_data, self.dev_labels = self.filter_data(self.dev_data, self.dev_labels) self.hidden_size = self.embedding.shape[1] self.embedding_layer = nn.Embedding(len(self.embedding_map), self.hidden_size) self.embedding_layer.weight = nn.Parameter(torch.from_numpy(self.embedding).float()) self.dropout = nn.Dropout(0.5) def get_train_data_wrapper(self): return DataWrapper(self.train_data, self.train_labels) def get_dev_data_wrapper(self): return DataWrapper(self.dev_data, self.dev_labels) def filter_data(self, data, labels): filt_data = [] filt_labels = [] for d, l in zip(data, labels): if d in self.embedding_map: filt_data.append(self.embedding_map[d]) filt_labels.append(l) return filt_data, filt_labels def load_embedding(self, stream): self.embedding_map = {} vocab = set(self.train_data) | set(self.dev_data) embedding = [] # first line, may or may not be the dimension info fd = next(stream).rstrip("\n").split(" ") if len(fd) > 2: word = fd[0] if word in vocab: vec = list(map(float, fd[1:])) embedding.append(vec) self.embedding_map.setdefault(word, len(self.embedding_map)) for line in stream: fd = line.rstrip("\n").split(" ") word = fd[0] if word in vocab: vec = list(map(float, fd[1:])) embedding.append(vec) self.embedding_map.setdefault(word, len(self.embedding_map)) self.embedding = np.array(embedding) def forward(self, data): return self.dropout(self.embedding_layer(data)) class Prober(BaseModel): def __init__(self, config, train_data, dev_data, encoder): super().__init__(config) # TODO make sure it's a deep-morphology experiment dir self.config = Config.from_yaml(config) self.config.train_file = train_data self.config.dev_file = dev_data enc_cfg = InferenceConfig.from_yaml( os.path.join(encoder, "config.yaml")) self.update_config(enc_cfg) self.data_class = getattr(data_module, self.config.dataset_class) self.train_data = self.data_class(self.config, train_data) self.dev_data = self.data_class(self.config, dev_data) self.encoder = self.load_encoder(enc_cfg) self.relabel_target() self.train_data.save_vocabs() self.output_size=len(self.train_data.vocabs.tgt), self.mlp = self.create_classifier() # fix encoder self.criterion = nn.CrossEntropyLoss() self.result = Result() def create_classifier(self): # BiLSTM enc_size = 2 * self.encoder.hidden_size # SOPA if hasattr(self.encoder, 'sopa'): enc_size = self.encoder.hidden_size return MLP( input_size=enc_size, layers=self.config.mlp_layers, nonlinearity=self.config.mlp_nonlinearity, output_size=self.output_size, ) def load_encoder(self, enc_cfg): model_class = getattr(model_module, enc_cfg.model) self.encoder = model_class(enc_cfg, self.dev_data) model_file = find_last_model(enc_cfg.experiment_dir) self.encoder.load_state_dict(torch.load(model_file)) if getattr(self.config, 'train_encoder', False) is False: for param in self.encoder.parameters(): param.requires_grad = False return self.encoder.encoder def update_config(self, encoder_cfg): enc_dir = encoder_cfg.experiment_dir self.config.encoder_dir = enc_dir for fn in os.scandir(enc_dir): if fn.name.startswith("vocab"): setattr(self.config, fn.name, fn.path) def relabel_target(self): vocab = Vocab(frozen=False, constants=[]) labels = [] for raw in self.train_data.raw: labels.append(vocab[raw.tgt]) self.train_data.mtx.tgt = labels self.train_data.vocabs.tgt = vocab vocab.frozen = True labels = [] for raw in self.dev_data.raw: labels.append(vocab[raw.tgt]) self.dev_data.mtx.tgt = labels self.dev_data.vocabs.tgt = vocab self.train_data.to_idx() self.dev_data.to_idx() def compute_loss(self, target, output): target = to_cuda(torch.LongTensor(target.tgt)).view(-1) loss = self.criterion(output, target) return loss def forward(self, batch): X = to_cuda(torch.LongTensor(batch.src)).transpose(0, 1) enc_out = self.encoder(X, batch.src_len) if len(enc_out) == 2: output, hidden = enc_out[:2] elif len(enc_out) == 3: # SOPA output = enc_out[2] hidden = enc_out[1] #output, hidden = self.encoder(X, batch.src_len)[:2] if getattr(self.config, 'use_lstm_state', False): mlp_out = self.mlp(hidden[1][-1]) else: idx = to_cuda(torch.LongTensor( [b-1 for b in batch.src_len])) brange = to_cuda(torch.LongTensor(np.arange(len(batch.src)))) mlp_out = self.mlp(output[idx, brange]) return mlp_out def run_train(self): train_data = self.train_data dev_data = self.dev_data result = self.result self.init_optimizers() saved = False for epoch in range(self.config.epochs): self.fix_encoder_if_necessary(epoch) self.train(True) train_loss, train_acc = self.run_epoch(train_data, do_train=True, result=result) result.train_loss.append(train_loss) result.train_acc.append(train_acc) self.train(False) dev_loss, dev_acc = self.run_epoch(dev_data, do_train=False) result.dev_loss.append(dev_loss) result.dev_acc.append(dev_acc) s = self.save_if_best(train_loss, dev_loss, epoch) saved = saved or s logging.info("Epoch {}, Train loss: {}, Train acc: {}, " "Dev loss: {}, Dev acc: {}".format( epoch+1, round(train_loss, 4), round(train_acc * 100, 2), round(dev_loss, 4), round(dev_acc * 100, 2), )) if self.should_early_stop(epoch, result): logging.info("Early stopping.") break if epoch == 0: self.config.save() result.save(self.config.experiment_dir) if saved is False: self._save(epoch) def fix_encoder_if_necessary(self, epoch): if isinstance(self.config.train_encoder, bool): return if epoch == self.config.train_encoder: for param in self.encoder.parameters(): param.requires_grad = False def __enter__(self): self.result = Result() self.result.start() self.result.node = platform.node() self.result.parameters = sum( p.nelement() for p in self.parameters() if p.requires_grad) if use_cuda: self.result.gpu = torch.cuda.get_device_name( torch.cuda.current_device()) else: self.result.gpu = None self.result.train_size = len(self.train_data) self.result.dev_size = len(self.dev_data) self.config.save() return self def __exit__(self, *args): self.result.epochs_run = len(self.result.train_loss) self.config.save() self.result.end() self.result.save(self.config.experiment_dir) def _save(self, epoch): if self.config.overwrite_model is True: save_path = os.path.join(self.config.experiment_dir, "model") else: save_path = os.path.join( self.config.experiment_dir, "model.epoch_{}".format("{0:04d}".format(epoch))) logging.info("Saving model to {}".format(save_path)) torch.save( {'encoder': self.encoder.state_dict(), 'mlp': self.mlp.state_dict()}, save_path) class EmbeddingProber(Prober): def __init__(self, config, train_data, dev_data, embedding): BaseModel.__init__(self, config) self.config = Config.from_yaml(config) self.config.train_file = train_data self.config.dev_file = dev_data self.encoder = EmbeddingWrapper(train_data, dev_data, embedding) self.output_size = len(self.encoder.vocab_label) self.train_data = self.encoder.get_train_data_wrapper() self.dev_data = self.encoder.get_dev_data_wrapper() self.mlp = self.create_classifier() self.criterion = nn.CrossEntropyLoss() def create_classifier(self): enc_size = self.encoder.hidden_size return MLP( input_size=enc_size, layers=self.config.mlp_layers, nonlinearity=self.config.mlp_nonlinearity, output_size=self.output_size, ) def forward(self, batch): X = to_cuda(torch.LongTensor(batch[0])) embedded = self.encoder(X) mlp_out = self.mlp(embedded) return mlp_out def compute_loss(self, target, output): target = to_cuda(torch.LongTensor(target[1])).view(-1) loss = self.criterion(output, target) return loss def main(): args = parse_args() if args.embedding: with EmbeddingProber(args.config, train_data=args.train_file, dev_data=args.dev_file, embedding=args.embedding) as prober: prober = to_cuda(prober) prober.run_train() elif args.encoder: with Prober(args.config, encoder=args.encoder, train_data=args.train_file, dev_data=args.dev_file) as prober: prober = to_cuda(prober) prober.run_train() if __name__ == '__main__': log_fmt = '%(asctime)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) main()
UTF-8
Python
false
false
13,416
py
64
probe.py
45
0.575997
0.572568
0
368
35.453804
97
kikyou123/DCGAN
5,128,190,996,595
bd8bb7ca580012ea4e000d94908759bfb0f7c4aa
28eeb5db2629583e0e516cb2992efec172b03a36
/DCGAN_CelebA.py
01c47d1f2cbf4353a26f361ab2c1dfd5b0d81f86
[]
no_license
https://github.com/kikyou123/DCGAN
25113de82d8d8f4b76d0d8a9b0f2b2c09a936fe2
334b8b72a34bf31bffa753b3a0964b69dda4685b
refs/heads/master
2021-06-13T16:49:06.923924
2017-03-22T06:28:26
2017-03-22T06:28:26
85,309,959
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding: utf-8 # In[1]: import os import tensorflow as tf import numpy as np from tqdm import tqdm from op import * from utils import * from load_hdf import * # In[2]: seed = 42 np.random.seed(seed) tf.set_random_seed(seed) # In[3]: dim = 64 batch_size = 100 z_dim = 100 ndfc = 1024 learning_rate = 0.0002 beta1 = 0.5 max_epoch = 25 ntrain = 202458 model_dir = "{}_{}_{}_{}".format('CelebA', batch_size, 64, 64) data_dir = '/home/data/houruibing/CelebA' # In[4]: tr_data, tr_stream = faces(ntrain = ntrain, batch_size = batch_size, data_dir = data_dir ) tr_handle = tr_data.open() # In[5]: checkpoint_dir = 'checkpoint/CNN_GAN/CelebA' if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) # In[6]: sample_dir = 'generate_images/CNN_GAN/CelebA' if not os.path.exists(sample_dir): os.makedirs(sample_dir) # In[7]: log_dir = 'log/CNN_GAN/CelebA' if not os.path.exists(log_dir): os.makedirs(log_dir) # In[8]: sample_z = np.random.uniform(-1, 1, [batch_size, z_dim]) sample_num = batch_size tr_handle = tr_data.open() sample_inputs, = tr_data.get_data(tr_handle, slice(0, batch_size))# int 0-255 sample_inputs = transform(sample_inputs) # In[9]: # define D-network def discriminator(image): """create a network that discriminates between images from a dataset and g genetated ones Args: image: a batch of real images [batch_size, height, weight, channels] Rerurns: A tensor that represents the probability of the real image""" h1 = lrelu(conv2d(image, dim, name = 'd_h1_conv')) h2 = lrelu(batch_norm(conv2d(h1, dim * 2, name = 'd_h2_conv'), name = 'd_bn2')) h3 = lrelu(batch_norm(conv2d(h2, dim * 4, name = 'd_h3_conv'), name = 'd_bn3')) h4 = lrelu(batch_norm(conv2d(h3, dim * 8, name = 'd_h4_conv'), name = 'd_bn4')) h5 = linear(flatten(h4), 1, 'd_h5_lin') #h5 = linear(tf.reshape(h4, [batch_size, -1]), 1, 'd_h5_lin') #h5 = lrelu(batch_norm(linear(tf.reshape(h4, [batch_size, -1]), ndfc, 'd_h5_lin'), name = 'd_bn5')) #h6 = linear(h5, 1, 'd_h6_lin') return sigmoid(h5) # In[10]: def generator(z): """Create a network that genetates images Args: z: input random noise of size [batch_size, dim_z] Returns: A deconvolutional network that generated images of size[batch_size, height, weight, channle]""" h0 = relu(batch_norm(linear(z, dim * 8 * 4 * 4, 'g_h0_lin'), name = 'g_bn0')) h0 = tf.reshape(h0, [-1, 4, 4, dim * 8]) h1 = relu(batch_norm(deconv2d(h0, [batch_size, 8, 8, dim * 4], name = 'g_h1'), name = 'g_bn1')) h2 = relu(batch_norm(deconv2d(h1, [batch_size, 16, 16, dim * 2], name = 'g_h2'), name = 'g_bn2')) h3 = relu(batch_norm(deconv2d(h2, [batch_size, 32, 32, dim], name = 'g_h3'), name = 'g_bn3')) h4 = deconv2d(h3, [batch_size, 64, 64, 3], name = 'g_h4') h4 = tf.nn.tanh(h4) return h4 # In[11]: def train(): with tf.variable_scope('Gen') as scope: z = tf.placeholder(tf.float32, shape = [None, z_dim]) z_sum = tf.summary.histogram("z", z) G = generator(z) G_sum = tf.summary.image("G", G) with tf.variable_scope('Disc') as scope: x = tf.placeholder(tf.float32, shape = [None, 64, 64, 3]) D1 = discriminator(x) scope.reuse_variables() D2 = discriminator(G) d1_sum = tf.summary.histogram("d1", D1) d2_sum = tf.summary.histogram("d2", D2) d_loss_real = tf.reduce_mean(-tf.log(D1)) d_loss_fake = tf.reduce_mean(-tf.log(1 - D2)) g_loss = tf.reduce_mean(-tf.log(D2)) d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) d_loss = d_loss_real + d_loss_fake g_loss_sum = tf.summary.scalar("g_loss", g_loss) d_loss_sum = tf.summary.scalar("d_loss", d_loss) d_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'Disc') g_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'Gen') d_optim = tf.train.AdamOptimizer(0.0002, beta1 = beta1).minimize(d_loss, var_list = d_params) g_optim = tf.train.AdamOptimizer(0.001, beta1 = beta1).minimize(g_loss, var_list = g_params) saver = tf.train.Saver() config = tf.ConfigProto() #config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.6 sess = tf.Session(config = config) init = tf.global_variables_initializer() sess.run(init) g_sum = tf.summary.merge([z_sum, d2_sum, G_sum, d_loss_fake_sum, g_loss_sum]) d_sum = tf.summary.merge([z_sum, d1_sum, d_loss_real_sum, d_loss_sum]) writer = tf.summary.FileWriter(log_dir, sess.graph) # if load(checkpoint_dir): # print(" [*] Load SUCCESS") # else: # print(" [!] Load failed...") counter = 1 for epoch in range(max_epoch): batch_idx = ntrain // batch_size idx = -1 # each epoch iter time for imb, in tqdm(tr_stream.get_epoch_iterator(), total = ntrain / batch_size): idx += 1 batch_z = np.random.uniform(-1, 1, [batch_size, z_dim]).astype(np.float32) images = transform(imb) #update D _, summary_str = sess.run([d_optim, d_sum], feed_dict = {x: images, z: batch_z}) writer.add_summary(summary_str, counter) #update G _, summary_str = sess.run([g_optim, g_sum], feed_dict = {z: batch_z}) writer.add_summary(summary_str, counter) #update G twice #_, summary_str = sess.run([g_optim, g_sum], feed_dict = {z: batch_z}) #writer.add_summary(summary_str, counter) counter = counter + 1 errD = sess.run(d_loss, {z: batch_z, x: images}) errG = sess.run(g_loss, {z: batch_z}) print ("Epoch: [%2d] [%4d%4d] d_loss: %.8f, g_loss: %.8f" % (epoch, idx, batch_idx,errD, errG )) # generate samples if counter % 100 == 0: samples, d1_loss, g1_loss = sess.run([G, d_loss, g_loss], feed_dict = {z: sample_z, x: sample_inputs}) samples = inverse_transform(samples) save_images(samples, [4, 8], './{}/train_{:02d}_{:04d}.png'.format(sample_dir, epoch, idx)) print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d1_loss, g1_loss)) # save parameters if counter % 500 == 0: save(checkpoint_dir, counter, saver, sess) sess.close() # In[12]: def save(checkpoint_dir, step, saver, sess): model_name = 'GAN_CNN.model' checkpoint_dir = os.path.join(checkpoint_dir, model_dir) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step = step) # In[13]: def load(checkpoint_dir): print(" [*] Reading checkpoints...") checkpoint_dir = os.path.join(checkpoint_dir, model_dir) ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name)) print(" [*] Success to read {}".format(ckpt_name)) return True else: print(" [*] Failed to find a checkpoint") return False # In[14]: def main(_): train() if __name__ == '__main__': tf.app.run()
UTF-8
Python
false
false
7,887
py
7
DCGAN_CelebA.py
7
0.563712
0.535438
0
250
30.54
108
rkudache31/django
2,044,404,474,402
f295dd77f2144ca4186cb18223731900d4a9decc
5bc99197b793971c76a69ba78f2db194d47a999a
/homefood/food/migrations/0004_auto_20200517_1434.py
def7407af482f54ec3dcd8b980c021a008d2c5cc
[]
no_license
https://github.com/rkudache31/django
180abb329ec5b3d1f709616615a1ef7c4d23c08b
4fab3ba0a1bdf4e0dfd36c24cc66ab195dcb919b
refs/heads/master
2023-07-17T14:34:36.207475
2021-08-25T20:55:28
2021-08-25T20:55:28
259,901,385
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 3.0.3 on 2020-05-17 09:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('food', '0003_item_item_type'), ] operations = [ migrations.AlterField( model_name='item', name='Item_desc', field=models.CharField(max_length=300), ), ]
UTF-8
Python
false
false
399
py
14
0004_auto_20200517_1434.py
7
0.551378
0.496241
0
18
20.166667
51
GuyCarver/raspi
10,668,698,780,839
5cbad709642e542f1b7c1cd52d7380055aa20e50
6ed530cc9fc7758c1df341c9d624e92539300c6a
/projects/sentrybot/body.py
9d33418dec75eff09371add799993c2c8bc7b9d7
[]
no_license
https://github.com/GuyCarver/raspi
aa87bff08969069074a98196574edaa27be8585c
30dc2b595274cdc5afe07767b584fbb2cfdfa7f1
refs/heads/master
2022-11-24T14:05:33.867306
2022-11-11T16:20:22
2022-11-11T16:20:22
109,533,023
0
0
null
false
2020-10-13T04:36:04
2017-11-04T21:53:07
2020-09-07T15:56:25
2020-10-13T04:36:02
83,228
0
0
1
C++
false
false
#!/usr/bin/env python3 #11/10/2018 11:10 AM from quicrun import * from atom import * #------------------------------------------------------------------------ class part(object): '''abstract base for a body part. anglepart and speedpart derive from this to control the value as either a +/-90 deg angle or 0-100 speed.''' def __init__(self, aPCA, aIndex, aName): self._pca = aPCA self._index = aIndex self._name = aName self._value = -1.0 self._currentValue = 0.0 #The actual value that may vary from value if rate > 0. self._rate = 0.0 #Rate of interpolation between _value and _target value. #This is in units/second. IE: 180 is 180 degrees a second. self._minmax = self._defminmax self._center = 0.0 self.value = 0.0 #Set real value and write servo. self.scale = 1.0 #Scale value @property def index( self ): return self._index @property def name( self ): return self._name @property def center( self ): return self._center @center.setter def center( self, aValue ): self._center = aValue @property def value( self ): return self._value @property def currentValue( self ): return self._currentValue @value.setter def value( self, aValue ): aValue = min(max(aValue, self._minmax[0] - self._center), self._minmax[1] - self._center) if aValue != self._value: self._value = aValue if self._rate <= 0.0: self._currentValue = aValue self.setservo() @property def minmax( self ): return self._minmax @minmax.setter def minmax( self, aValue ): #If tuple just use directly without checking legitimate ranges. if isinstance(aValue, tuple) or isinstance(aValue, list): self._minmax = aValue else: #otherwise it's considered a single # we use for both min/max. self._minmax = (max(-aValue, self._defminmax[0]), aValue) @property def minmaxforjson( self ): '''If min == -max then just return max (single value) otherwise return min/max tuple.''' if self._minmax[0] == -self._minmax[1]: return self._minmax[1] return self._minmax @property def rate( self ): return self._rate @rate.setter def rate( self, aValue ): self._rate = aValue def off( self ): '''Turn the servo off.''' self._pca.off(self._index) def update( self, aDelta ): '''Update speed towards target given delta time in seconds.''' if self._rate > 0.0 and aDelta > 0.0: diff = self._value - self._currentValue if abs(diff) > 0.01: if diff < 0.0: mm = max d = -aDelta else: mm = min d = aDelta diff = mm(self._rate * d, diff) newvalue = self._currentValue + diff self._currentValue = mm(newvalue, self._value) self.setservo() #------------------------------------------------------------------------ class anglepart(part): _defminmax = (-100.0, 100.0) '''Body part that uses an angle value from -90 to 90.''' def __init__( self, aPCA, aIndex, aName ): super(anglepart, self).__init__(aPCA, aIndex, aName) def setservo( self ): '''Set the servo to current angle.''' self._pca.setangle(self._index, int(self._currentValue + self._center)) #------------------------------------------------------------------------ class speedpart(part): _defminmax = (0.0, 100.0) '''Body part that uses a speed value from 0-100.''' def __init__( self, aPCA, aIndex, aName ): super(speedpart, self).__init__(aPCA, aIndex, aName) def setservo( self ): '''Set the servo to current speed.''' v = int(self._currentValue + self._center) # print('setting', v) self._pca.set(self._index, v) #------------------------------------------------------------------------ _ANGLE, _SPEED, _MOTOR, _ATOM = range(4) _CONSTRUCTORS = {_ANGLE : anglepart, _SPEED : speedpart, _MOTOR : quicrun, _ATOM : atom} #Default data used to initialize the parts. This will be # modified by the json data. #Name, Servo #, Type, Rate, Trim, Range (min, max) or minmax. _defaultdata = ( ("TORSO", 8, _ANGLE, 0.0, 0.0, 90.0), ("HEAD_H", 0, _ANGLE, 0.0, 0.0, 90.0), ("HEAD_V", 1, _ANGLE, 0.0, 0.0, 30.0), ("LARM_H", 2, _ANGLE, 0.0, 0.0, 90.0), ("LARM_V", 3, _ANGLE, 0.0, 0.0, 90.0), ("RARM_H", 5, _ANGLE, 0.0, 0.0, 90.0), ("RARM_V", 6, _ANGLE, 0.0, 0.0, 90.0), ("LLEG", 9, _MOTOR, 20.0, 0.0, 20.0), ("RLEG", 10, _MOTOR, 20.0, 0.0, 20.0), ("GUN", 11, _MOTOR, 25.0, 0.0, (0.0, 100.0)), ("MISSILES", 12, _ANGLE, 10.0, 0.0, 10.0), ("SMOKE", 15, _MOTOR, 1000.0, 0.0, (50.0, 100.0)), ) _numparts = len(_defaultdata) _TORSO, _HEAD_H, _HEAD_V, \ _LARM_H, _LARM_V, \ _RARM_H, _RARM_V, \ _LLEG, _RLEG, \ _GUN, _MISSILES, _SMOKE = range(_numparts) _parts = [None] * _numparts _initdata = None def partindex( aName ): for i, v in enumerate(_defaultdata): if v[0] == aName: return i raise Exception("Part {} not found.".format(aName)) def getpart( aIndex ): return _parts[aIndex] def parttype( aPart ): '''Get part type constructor enum from part type.''' tp = type(aPart) for t in _CONSTRUCTORS.items(): if t[1] == tp: return t[0] raise Exception('Unknown part type {}.', tp) def saveparts( aData ): '''Save part data for each part into the given json dictionary.''' def makeentry(i, p): return (_defaultdata[i][0], p.index, parttype(p), p.rate, p.center, p.minmaxforjson) data = [makeentry(i, p) for i, p in enumerate(_parts)] aData['parts'] = data def loadparts( aData ): '''Load data from given json dictionary.''' global _initdata try: _initdata = aData['parts'] except: pass def initparts( aPCA ): '''Initialize the parts from the _initdata dictionary. If that is None, use default data.''' global _initdata if _initdata == None: _initdata = _defaultdata #Create part for given part data. for pdata in _initdata: name, index, typ, rate, center, minmax = pdata part = _CONSTRUCTORS[typ](aPCA, index, name) part.rate = rate part.minmax = minmax part.center = center part.value = center pi = partindex(name) _parts[pi] = part def setmotordata( aIndex, aRate, aMinMax ): '''Set the motor data on a part.''' _parts[aIndex].rate = aRate _parts[aIndex].minmax = aMinMax def updateparts( aDelta ): '''Iterate through parts and call update.''' for p in _parts: p.update(aDelta) def off( aIndex = -1 ): '''Turn given part off. If no part index given, turn all of them off.''' if aIndex >= 0: if _parts[aIndex]: _parts[aIndex].off() else: for p in _parts: if p: p.off() from pca9685 import * def test( ): p = pca9865(100) sleep(2.0) a = anglepart(p, 5, 'test') a.minmax = 90.0 a.rate = 90.0 mn, mx = a.minmax def waitforit( part, aDir, aTarget ): a.value = aTarget while(part.currentValue != aTarget): print(part.currentValue, aTarget) sleep(0.01) part.update(0.01 * aDir) waitforit(a, 1.0, mx) waitforit(a, 1.0, mn) a.off() print('done') #------------------------------------------------------------------------ if __name__ == '__main__': #Run tests. test()
UTF-8
Python
false
false
7,356
py
156
body.py
120
0.566476
0.535481
0
272
26.047794
107
yuliyabirman/pynd-pubs
7,550,552,538,196
34d0234ec6184516c58f474cfa89514a3adb9ee6
fd7b81162c68cd1b78fedc1caec829fbebe3dc38
/src/calc_fitness.py
f3f777a73038dfd3986083bd9f6c7f1b106a72f3
[ "Apache-2.0" ]
permissive
https://github.com/yuliyabirman/pynd-pubs
938972a62934c1771c081a9dba72b1f513a964c3
61ee54f152343689169e1e6664edea94beed37cc
refs/heads/master
2020-12-14T08:53:07.440864
2015-11-11T17:16:34
2015-11-11T17:16:34
45,495,227
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import math import sys import pickle import numpy from scipy import stats import os homedir = os.path.expanduser('~') def calc_ratio(frequencies): # find mutant positions # find WT positions # map residue positions to mutants and WT WT_frequency = None mutant_frequencies = {} for key, frequency in frequencies.items(): codon = key[1] if codon == 'WT': assert WT_frequency == None WT_frequency = frequency else: mutant_frequencies[key] = frequency # calculate ratio #ratio = mutant_frequency / WT_frequency # calculate the ratio of each single-codon mutation relative to the wild-type sequence for each time point on a log2 scale return {key: numpy.log2(value / float(WT_frequency)) for key, value in mutant_frequencies.items()} def load_ratios(pickle_file): with open(pickle_file, 'rb') as f: freqs = pickle.load(f) return calc_ratio(freqs) def calc_regression(times, ratio_list): # get list of mutants to iterate over # this will be the union of the mutants across all of the time points mutants = set() for ratios in ratio_list: for mutant, ratios in ratios.items(): mutants.add(mutant) slopes = {} # now calculate a regressino for each mutant for mutant in mutants: # get ratios with 0.0 for mutants that are not found new_ratios = [] for ratios in ratio_list: if mutant in ratios: new_ratios.append(ratios[mutant]) else: new_ratios.append(0) slope, intercept, r_value, p_value, std_err = stats.linregress(times, new_ratios) slopes[mutant] = slope return slopes # TODO: Stop hardcoding def main(args): test_name = args[1] # Day 1, timepoints T0, T1, T2 # for each residue position, find log2 ratio for each time point # load input files containing the residue position, codon, and frequency ratios_0 = load_ratios(homedir + "/populations/pop0_" + test_name + "_0.pkl") ratios_1 = load_ratios(homedir + "/populations/pop1_" + test_name + "_1.pkl") ratios_2 = load_ratios(homedir + "/populations/pop2_" + test_name + "_2.pkl") ratios_3 = load_ratios(homedir + "/populations/pop3_" + test_name + "_3.pkl") ratios_4 = load_ratios(homedir + "/populations/pop4_" + test_name + "_4.pkl") ratios_5 = load_ratios(homedir + "/populations/pop5_" + test_name + "_5.pkl") times_day_1 = [0, 2.43, 4.11] times_day_2 = [0, 2.02, 4.16] fitness_day_1 = calc_regression(times_day_1, [ratios_0, ratios_1, ratios_2]) fitness_day_2 = calc_regression(times_day_2, [ratios_3, ratios_4, ratios_5]) pickle.dump(fitness_day_1, open(homedir + '/fitness/' + test_name + '_fitness_day_1', 'wb')) pickle.dump(fitness_day_2, open(homedir + '/fitness/' + test_name + '_fitness_day_2', 'wb')) # determine selection coefficient, s, for each mutation # by finding the slope of this ratio to time in WT generations if __name__ == '__main__': import sys main(sys.argv)
UTF-8
Python
false
false
2,931
py
22
calc_fitness.py
19
0.67349
0.653019
0
93
30.516129
124
MrLinNing/FoolsGold
1,511,828,500,397
2a6e62bbbb27a12b552842a5483993f7f29172bc
501f11db8449149dfe84474b23b56433859bfb49
/ccs19-eval/vggface-iid-heatmap/iid_heatmap.py
f833180d6ba585bce3f0d99bbb870187e7c6e195
[]
no_license
https://github.com/MrLinNing/FoolsGold
eff28c036591c06dacb0d59f00e7999fff1c7b54
db25670a0502d879ffda018992280248a7e1b2b8
refs/heads/master
2021-01-05T16:21:05.216670
2019-05-16T02:49:10
2019-05-16T02:49:10
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import seaborn as sns import matplotlib.pylab as plt import re import pdb import pandas as pd def plot(): toplot = np.zeros((5, 5)) props = [0, 25, 50, 75, 100] df = pd.read_csv("squeeze-heatmap.csv", header=None) toplot = df.values sns.set(font_scale=1.4) ax = sns.heatmap(toplot, linewidth=0.5, annot=True, annot_kws={"size": 14}, fmt=".3f", center=0, cmap='Greys', xticklabels=props, yticklabels=props, vmin=0, vmax=1 ) plt.xlabel("Honest Shared Data Proportion", fontsize=18) plt.ylabel("Sybil Shared Data Proportion", fontsize=18) plt.tight_layout(pad=0.1) plt.savefig("vgg_iid_heatmap.pdf") plt.show() if __name__ == "__main__": plot()
UTF-8
Python
false
false
809
py
100
iid_heatmap.py
12
0.578492
0.543881
0
40
19.225
60
arkster/gmusic
2,216,203,134,664
7d10d75a38d594b62b57ffc49327e9b37ed3964e
43d12e8cea8d65d499cd674e0aec7993feac7e2d
/gmusic/media_resources.py
fd66f7cae44c15037891fb3fc26f11d12ee9a305
[]
no_license
https://github.com/arkster/gmusic
1ad082cdffc53f5e4b8be9a02dcd1a69d87eeadc
aa817bd14a8f9995e846784eeeecc9735b3545b4
refs/heads/master
2021-08-18T02:46:38.452230
2020-03-31T05:03:24
2020-03-31T05:03:24
146,410,728
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ gmusic.media_resources ~~~~~~~~~~~~~~~~~~~~~~ This module queries various radio stations to gather recently played songs on their playlists """ from datetime import datetime, timedelta from random import randint import asyncio import aiohttp import requests from bs4 import BeautifulSoup import box class MediaResources(object): """ Main class that queries for songs """ def __init__(self, timestamp=None, steps=None): if not steps: self.steps = 50000 else: self.steps = steps if timestamp: self.timestamp = timestamp else: self.timestamp = None self.music_list = [] self.radio_stations = { 'cbs_stations': { 'params': [['action', 'playlist'], ['type', 'json'], ['before']] , 'headers': { 'DNT': '1', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Accept': '*/*', 'Referer': 'http://{}.cbslocal.com/playlist', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', }, 'urls': { 'wxrt': 'http://wxrt.cbslocal.com/playlist/', 'x1075lasvegas': 'http://x1075.cbslocal.com/playlist', 'kroq': 'http://www.roq.com/playlist/', 'live105': 'http://www.live.com/playlist/', }, 'interval': self.steps * 4, }, 'tunegenie': { 'params': [['since', '2017-08-08T17:00:00-05:00'], ['until', '2017-08-08T18:59:59-05:00']], 'headers': { 'DNT': '1', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Accept': '*/*', 'Referer': 'http://{}.tunegenie.com/onair/', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', }, 'urls': { 'wwyy': 'http://wwyy.tunegenie.com/api/v1/brand/nowplaying/', 'wkqx': 'http://wkqx.tunegenie.com/api/v1/brand/nowplaying/' }, 'interval': self.steps * 4, }, 'iheart': { 'stations': ['star1019', 'dc101'], # 'stations': ['dc101'], 'data': [['nextPageToken', 'token'], ['template', 'playlist'], ['offset', '0'], ['limit', '150000'], ], 'headers': { 'DNT': '1', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', }, 'next_headers': { 'origin': 'https://{}.iheart.com', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-US,en;q=0.8', 'x-requested-with': 'XMLHttpRequest', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'accept': 'text/html, */*; q=0.01', 'referer': 'https://{}.iheart.com/gmusic/recently-played/', 'dnt': '1', }, 'url': 'https://{}.iheart.com/gmusic/recently-played/', 'next_url': 'https://{}.iheart.com/api/gmusic/load_more/' } } def get_iso_time(self, interval): now = datetime.now() for i in range(0, self.steps): t = now.replace(microsecond=0) - timedelta(hours=interval, minutes=randint(0, 9)) yield t.isoformat() now = t def wrapper(self, func, interval): """ Coroutine wrapper""" yield from func(interval) def parse_cbs_station_data(self, data): """ Adds songs to list for cbs stations """ for each_data in data: box_data = box.Box(each_data) for each_song in box_data.data.recentEvents: self.music_list.append([each_song.artist, each_song.title]) def parse_tunegenie_data(self, data): """ Adds songs to list for tunegenie stations""" for each in data: mbox = box.Box(each) for eachlist in mbox.response: if eachlist.artist.startswith('Weekdays,') or eachlist.artist.startswith( "The Valley's") or eachlist.artist.startswith("Sundays,"): continue self.music_list.append([eachlist.artist, eachlist.song]) def run_synchronous_process(self): """ Routine to scrape recently played song title/artist info in synchronous mode""" box_radio_stations = box.Box(self.radio_stations) for station in box_radio_stations.iheart.stations: content = requests.get(box_radio_stations.iheart.url.format(station), headers=box_radio_stations.iheart.headers).content soup = BeautifulSoup(content, 'html.parser', from_encoding='utf-8') for songinfo in [each.attrs['alt'] for each in soup.find_all() if 'alt' in each.attrs]: songdetails = songinfo.split(' - ')[::-1] if songdetails[0].startswith("Hawaii's Alternative") or songdetails[0].startswith('STATION_LOGO') or \ songdetails[0].startswith('{{') or songdetails[0].startswith('iHeartRadio') or songdetails[ 0].startswith('Sundays,'): continue self.music_list.append(songdetails) token = \ [each.attrs['data-nextpagetoken'] for each in soup.find_all() if 'data-nextpagetoken' in each.attrs][0] data = box_radio_stations.iheart.data interval = box_radio_stations.tunegenie.interval data[0][1] = token data[3][1] = interval url = box_radio_stations.iheart.next_url.format(station) box_radio_stations.iheart.next_headers.origin = box_radio_stations.iheart.next_headers.origin.format( station) iheart_next_content = requests.post(url, headers=box_radio_stations.iheart.next_headers, data=data).content next_soup = BeautifulSoup(iheart_next_content, 'html.parser', from_encoding='utf-8') for songinfo in [each.attrs['alt'] for each in next_soup.find_all() if 'alt' in each.attrs]: songdetails = songinfo.split(' - ')[::-1] if songdetails[0].startswith("Hawaii's Alternative") or songdetails[0].startswith('STATION_LOGO') or \ songdetails[0].startswith('{{') or songdetails[0].startswith('iHeartRadio') or songdetails[ 0].startswith('Sundays,'): continue self.music_list.append(songdetails) async def fetch(self, headers, url, client, params=None, station=None, until=None, since=None): """ Async fetch method to retrieve song data from urls :param headers: dict connection header :param url: string url :param client: Async client session object :param params: dict connection parameters :param station: string station name :param until: datetime time stamp :param since: datetime time stamp :returns coroutine json object """ if station == 'tunegenie': if until: params[1][1] = until else: until = params[0][1] params[0][1], params[1][1] = since, until elif station == 'cbs_stations': params[2][1] = since async with client.get(url, params=params, headers=headers) as resp: assert resp.status == 200 return await resp.json() async def run_loop(self, loop, headers, url, params=None, station=None, interval=None): """ Async run loop method to fetch data :param loop: Asyncio event loop :param headers: dict connection header :param url: string url :param params: dict connection parameters :param station: string station name :param interval: dict number of iterations to loop through :returns coroutine asyncio response """ until = None wrap = None since = None if station == 'cbs_stations': if len(params[-1]) == 1: params[-1].append(self.timestamp) wrap = self.wrapper(self.get_iso_time, interval) elif station == 'tunegenie': wrap = self.wrapper(self.get_iso_time, interval) tasks = [] async with aiohttp.ClientSession(loop=loop) as client: for i in range(self.steps): if station == 'cbs_stations': since = next(wrap) elif station == 'tunegenie': if self.timestamp is None: until = datetime.now().replace(microsecond=0).isoformat() since = next(wrap) task = asyncio.ensure_future(self.fetch(headers, url, client, params, station, until=until if self.timestamp is None else None, since=since)) tasks.append(task) self.timestamp = until responses = await asyncio.gather(*tasks) return responses
UTF-8
Python
false
false
10,602
py
6
media_resources.py
5
0.518676
0.495284
0
246
42.097561
142
TJCrisostomo/Automation
816,043,817,422
a8fde6a48ba58f76e88e7e713cab2849bbd3baf8
81761e416c6164d13c2da6d793d282ce8bc1e3c3
/myconfig.py
947e7947c397bf27d8d9254c3f30d1278c44dbc4
[]
no_license
https://github.com/TJCrisostomo/Automation
46c9618d825972dfcf0284c61c040df9c86a2465
3743b6b8ba34b336849bc5fc89ece96d909ea91e
refs/heads/master
2021-01-19T09:48:47.265655
2017-02-16T08:27:07
2017-02-16T08:27:07
82,141,378
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# myconfig.py: var_username = '0468827174' var_password = 'theHoff34' var_url = 'https://accounts.amaysim.com.au'
UTF-8
Python
false
false
115
py
3
myconfig.py
2
0.713043
0.608696
0
5
22
43
larry1101/Tesselladou
1,846,835,952,728
4041d518e99f306d9113b45b69e9b335a745169e
400d2d336d9e3d86ad010e60e1994c786ff4138b
/tessellat.py
258fee8e35f1523159e77d7baf7b17592ad3e4f1
[]
no_license
https://github.com/larry1101/Tesselladou
505cc45a70810a44bb55babb915fd4a5a1974ffa
516a7dfcfbeb8748ceb1e17e00314350f27ec792
refs/heads/master
2021-05-05T23:25:42.019447
2018-01-09T07:07:08
2018-01-09T07:07:08
116,675,742
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import random import svgwrite from svgwrite.container import Defs from svgwrite.gradients import LinearGradient from svgwrite.shapes import Polygon fosu = ['#F0FFFD', '#97FBE7', '#53F0C9', '#33BD9B', '#2BA473', '#41D6BF', '#39CDDD', '#C1FDDE', '#42FF80', '#30C965', '#26C168', '#31D9B3', ] shinnsha = ['#FF0202', '#FF3C3C', '#F2655E', '#FA8B8B', '#F9B0AC', '#FCDCDA', '#FB0F26', '#FD3145', '#FC495B', '#FD6C7B', '#F84040', '#FF9EA6', '#AD0310', '#CF0312', '#B41612'] anntaku = ['#C0C0C0', '#C8CBDE', '#DBE3E2', '#D1D7CC', '#5E6377', '#DDDDDD', '#C9CDE0', '#CBD3D2', '#C1C7BC', '#7E8397', '#FFFFFF'] diamond = ['#FF0000', '#FBFB3C', '#13EC18', '#0E02FE', '#6803C2', '#DDDDDD', '#C9CDE0', '#CBD3D2', '#C1C7BC', '#7E8397', '#FFFFFF'] tiger = ['#F1DD25', '#F5E86B', '#F9EF9B', '#FDF9D2', '#F4E451', '#F2B026', '#F3BA43', '#F4C560', '#F7CF7D', '#FBB779', '#C05F05', '#C59A05', '#AE561C', '#DC6C07', '#B32B09'] sakura = ['#F86381', '#FD6A88', '#FC6382', '#FD718C', '#FD869D', '#FD97AB', '#FEB1C0', '#FEDEE4', '#FEDEDE', '#FEC9C9', '#F73E63', '#D2022C', '#CB032C', '#F85C6C', '#FCCCD1'] colors = {'fosu': fosu, 'shinnsha': shinnsha, 'anntaku': anntaku, 'diamond': diamond,'tiger':tiger,'sakura':sakura} using_color = 'fosu' dwg = svgwrite.Drawing('%s.svg' % using_color) patternize = True pattern_labels_cnt = 2 pattern_file_name = '%s.bmp'%using_color # pattern_file_name = 'doudou.bmp' if patternize: from skimage import io img = io.imread(pattern_file_name) width = img.shape[1] height = img.shape[0] step_min = 11 step_max = 23 triangle_leftest = 20 else: width = 1000 height = 600 step_min = 17 step_max = 43 triangle_leftest = 13 uni_left = True have_gradient = False gradient_cnt = 3 center_y_rand = True if patternize: import math def get_rgb(rgb_str): return int(rgb_str[1:3], 16), int(rgb_str[3:5], 16), int(rgb_str[5:], 16) def hsv2rgb(hsv): h = float(hsv[0]) s = float(hsv[1]) v = float(hsv[2]) h60 = h / 60.0 h60f = math.floor(h60) hi = int(h60f) % 6 f = h60 - h60f p = v * (1 - s) q = v * (1 - f * s) t = v * (1 - (1 - f) * s) r, g, b = 0, 0, 0 if hi == 0: r, g, b = v, t, p elif hi == 1: r, g, b = q, v, p elif hi == 2: r, g, b = p, v, t elif hi == 3: r, g, b = p, q, v elif hi == 4: r, g, b = t, p, v elif hi == 5: r, g, b = v, p, q r, g, b = int(r * 255), int(g * 255), int(b * 255) return r, g, b def rgb2hsv(rgb): if isinstance(rgb, tuple): r = rgb[0] g = rgb[1] b = rgb[2] else: raise Exception('Need rgb tuple') r, g, b = r / 255.0, g / 255.0, b / 255.0 mx = max(r, g, b) mn = min(r, g, b) df = mx - mn if mx == mn: h = 0 elif mx == r: h = (60 * ((g - b) / df) + 360) % 360 elif mx == g: h = (60 * ((b - r) / df) + 120) % 360 elif mx == b: h = (60 * ((r - g) / df) + 240) % 360 else: raise Exception('rgb2hsv error') if mx == 0: s = 0 else: s = df / mx v = mx return h, s, v def divide_colors(): for colors_series in colors: v_tmp = [] for color in colors[colors_series]: v_tmp.append(rgb2hsv(get_rgb(color))[2]) v_max = max(v_tmp) v_min = min(v_tmp) v_d = (v_max - v_min) / pattern_labels_cnt color_series_tmp = {} for label in range(pattern_labels_cnt): color_series_tmp[label] = [] for v_index in range(len(v_tmp)): try: color_series_tmp[int((v_tmp[v_index] - v_min) / v_d)].append(colors[colors_series][v_index]) except KeyError: color_series_tmp[int((v_tmp[v_index] - v_min) / v_d) - 1].append(colors[colors_series][v_index]) colors[colors_series] = color_series_tmp divide_colors() def get_color(points): if patternize: x_sum = 0 y_sum = 0 for point in points: x_sum += point[0] y_sum += point[1] x_avg = int(x_sum / len(points)) y_avg = int(y_sum / len(points)) img_col_ref = img[y_avg][x_avg] hsv = rgb2hsv((img_col_ref[0], img_col_ref[1], img_col_ref[2])) label = int(hsv[2] / (1 / pattern_labels_cnt)) try: return random.sample(colors[using_color][label], 1)[0] except KeyError: return random.sample(colors[using_color][label - 1], 1)[0] else: return random.sample(colors[using_color], 1)[0] if have_gradient: defs = Defs(id='gradients') colors_gradient = [] gradient_direction = [(1, 1), (0, 1), (1, 0)] for gradient_index in range(gradient_cnt): l_g = LinearGradient((0, 0), random.choice(gradient_direction), id='gradient_%d' % gradient_index) l_g.add_colors(random.sample(colors[using_color], 2)) defs.add(l_g) colors_gradient.append('url(#gradient_%d)' % gradient_index) colors[using_color] += colors_gradient dwg.add(defs) # start x_p0 = [] for i in range(triangle_leftest + 1): x_p0.append(0) if uni_left: y_p0 = [] for i in range(triangle_leftest + 1): y_p0.append(int(height / triangle_leftest * i)) else: y_p0 = random.sample(range(1, height), triangle_leftest - 1) y_p0.sort() y_p0.insert(0, 0) y_p0.append(height) p0 = list(zip(x_p0, y_p0)) while True: x_p0_max = max(x_p0) if width - x_p0_max > step_max: x_p2 = [] for i in range(triangle_leftest): x_p2.append(random.randint(step_min, step_max)) for i in range(len(x_p2)): x_p2[i] += x_p0_max y_p2 = [] for i in range(1, len(y_p0)): if center_y_rand: d = y_p0[i] - y_p0[i - 1] y_p2.append(random.randint(y_p0[i - 1] + int(0.45 * d), y_p0[i - 1] + int(0.55 * d))) else: y_p2.append(random.randint(y_p0[i - 1], y_p0[i] - 1)) p2 = list(zip(x_p2, y_p2)) for i in range(len(p2)): dwg.add(Polygon([p0[i], p2[i], p0[i + 1]], fill=get_color([p0[i], p2[i], p0[i + 1]]))) for i in range(len(p2) - 1): dwg.add(Polygon([p2[i], p0[i + 1], p2[i + 1]], fill=get_color([p2[i], p0[i + 1], p2[i + 1]]))) x_p2_max = max(x_p2) if width - x_p2_max > step_max: x_p1 = [] for i in range(triangle_leftest + 1): x_p1.append(random.randint(step_min, step_max)) for i in range(len(x_p1)): x_p1[i] += x_p2_max y_p1 = [] for i in range(1, len(y_p2)): if center_y_rand: d = y_p2[i] - y_p2[i - 1] y_p1.append(random.randint(y_p2[i - 1] + int(0.45 * d), y_p2[i - 1] + int(0.55 * d))) else: y_p1.append(random.randint(y_p2[i - 1], y_p2[i] - 1)) y_p1.insert(0, 0) y_p1.append(height) p1 = list(zip(x_p1, y_p1)) for i in range(len(p2)): dwg.add(Polygon([p1[i], p2[i], p1[i + 1]], fill=get_color([p1[i], p2[i], p1[i + 1]]))) for i in range(len(p2) - 1): dwg.add(Polygon([p2[i], p1[i + 1], p2[i + 1]], fill=get_color([p2[i], p1[i + 1], p2[i + 1]]))) dwg.add(Polygon([p0[0], p1[0], p2[0]], fill=get_color([p0[0], p1[0], p2[0]]))) dwg.add(Polygon([p0[-1], p1[-1], p2[-1]], fill=get_color([p0[-1], p1[-1], p2[-1]]))) else: x_p1 = [] for i in range(triangle_leftest + 1): x_p1.append(width) y_p1 = [] for i in range(1, len(y_p2)): if center_y_rand: d = y_p2[i] - y_p2[i - 1] y_p1.append(random.randint(y_p2[i - 1] + int(0.45 * d), y_p2[i - 1] + int(0.55 * d))) else: y_p1.append(random.randint(y_p2[i - 1], y_p2[i] - 1)) y_p1.insert(0, 0) y_p1.append(height) p1 = list(zip(x_p1, y_p1)) for i in range(len(p2)): dwg.add(Polygon([p1[i], p2[i], p1[i + 1]], fill=get_color([p1[i], p2[i], p1[i + 1]]))) for i in range(len(p2) - 1): dwg.add(Polygon([p2[i], p1[i + 1], p2[i + 1]], fill=get_color([p2[i], p1[i + 1], p2[i + 1]]))) dwg.add(Polygon([p0[0], p1[0], p2[0]], fill=get_color([p0[0], p1[0], p2[0]]))) dwg.add(Polygon([p0[-1], p1[-1], p2[-1]], fill=get_color([p0[-1], p1[-1], p2[-1]]))) break else: x_p2 = [] for i in range(triangle_leftest): x_p2.append(width) y_p2 = [] for i in range(1, len(y_p0)): if center_y_rand: d = y_p0[i] - y_p0[i - 1] y_p2.append(random.randint(y_p0[i - 1] + int(0.45 * d), y_p0[i - 1] + int(0.55 * d))) else: y_p2.append(random.randint(y_p0[i - 1], y_p0[i] - 1)) p2 = list(zip(x_p2, y_p2)) for i in range(len(p2)): dwg.add(Polygon([p0[i], p2[i], p0[i + 1]], fill=get_color([p0[i], p2[i], p0[i + 1]]))) for i in range(len(p2) - 1): dwg.add(Polygon([p2[i], p0[i + 1], p2[i + 1]], fill=get_color([p2[i], p0[i + 1], p2[i + 1]]))) dwg.add(Polygon([p0[0], (width, 0), p2[0]], fill=get_color([p0[0], (width, 0), p2[0]]))) dwg.add(Polygon([p0[-1], (width, height), p2[-1]], fill=get_color([p0[-1], (width, height), p2[-1]]))) break x_p0 = x_p1.copy() y_p0 = y_p1.copy() p0 = list(zip(x_p0, y_p0)) dwg.save()
UTF-8
Python
false
false
10,060
py
7
tessellat.py
2
0.46829
0.400795
0
302
32.311258
116
brianlee389/twilioadapter
3,796,751,102,717
c48c0ee931fe8f674910e40ee60c7f2fd2af8b28
ecfcfa6c3df973f04741e2e12eacccf3bb6ecb21
/main.py
3c497499e55bfbd057b309264bf727df897b56fd
[]
no_license
https://github.com/brianlee389/twilioadapter
f3246e2cb29b2d80a3de32f1033317ea1bb46be5
26628df29484b4f7078185f0d5a4c04c75e9db2a
refs/heads/master
2023-01-12T08:17:03.828937
2016-04-17T08:07:55
2016-04-17T08:07:55
56,425,799
0
0
null
false
2022-12-26T20:15:04
2016-04-17T08:06:33
2016-04-17T08:09:43
2022-12-26T20:15:02
24
0
0
17
Python
false
false
#!/usr/bin/env python import os import sys from flask import Flask, request, redirect, jsonify from twilio.rest import TwilioRestClient from twilio.util import RequestValidator from flask import Flask, request, redirect, jsonify from twilio.rest import TwilioRestClient from twilio.util import RequestValidator # Find these values at https://twilio.com/user/account account_sid = "ACebc67b30866a5d17990ecc0ea0951953" auth_token = "5332e7343302db39102d795722a3e867" # secret - 5nnti0xNBNq8u2DUadAzuNztNjlkGF7C app = Flask(__name__) @app.route("/", methods=['GET']) def send_text(): phone = str(request.args.get("phone")) level = int(request.args.get("level")) score = int(request.args.get("score")) message = "" if phone == None or level == None: message = message + "Missing arguments" return (message, 404, "") if len(phone) != 9 and len(phone) != 10: message = message + "Invalid phone number" return (message, 404, "") leveldescription = "You passed level 1" if level == 1: leveldescription = "You passed level 1" elif level == 2: leveldescription = "You passed level 2" elif level == 3: leveldescription = "You passed level 3" else: message = message + " and Invalid level number." return (message, 404, "") leveldescription = str(leveldescription) + " with a score of " + str(score) client = TwilioRestClient(account_sid, auth_token) message = client.messages.create(to=phone, from_="+17325154058", body=leveldescription) return ('All good', 200, "") if __name__ == "__main__": app.run(debug=True, host='0.0.0.0', port=5000) #if __name__ == "__main__": # os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gettingstarted.settings") # from django.core.management import execute_from_command_line # execute_from_command_line(sys.argv)
UTF-8
Python
false
false
1,794
py
3
main.py
1
0.710702
0.658863
0
59
29.389831
88
kpapag/hua_hospitalServices
5,480,378,316,195
2a02489820d17f2a1ae3f6035910eae241002821
023a951bad97aea0b0aba746489805d9353a7c5a
/VirtualEnviromentDjango/hospitalPoject/__init__.py
3b7f2c4e43239afdf43a2fcbb7bcf7706e2b27ec
[]
no_license
https://github.com/kpapag/hua_hospitalServices
a0b4f37f871c4d14836cb0a3f80494f132a32cd3
4303a68d18630c16b7ef842cfd712900cbe406e2
refs/heads/master
2021-05-29T15:45:25.945523
2015-06-15T18:50:29
2015-06-15T18:50:29
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
__author__ = 'abaoubas'
UTF-8
Python
false
false
24
py
78
__init__.py
31
0.583333
0.583333
0
1
23
23
HutchieV/UIRS
16,372,415,353,877
45e87a4385c3a144cfcb601b820ced0f384fb15d
a879bfe59bcf8b820e52d4a7b42b83a742d970c8
/data/print_decorator.py
ffc1d3a9238600b494a276cdf9f867fc472627f1
[ "Apache-2.0" ]
permissive
https://github.com/HutchieV/UIRS
7ec9f223197de9ef1a42490dc5b6a7bf9326938e
a9d66b6890986445f9060c6e579e8af70d814ada
refs/heads/main
2023-04-11T04:48:35.038345
2021-04-21T14:35:20
2021-04-21T14:35:20
329,394,319
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from datetime import datetime class PrintDecorator(): """ Contains a print decorator for printing time, thread/class, and debug messages. """ def __init__(self, sid=None, pd=False): if sid: self.script_id = "[{}]".format(sid) else: self.script_id = sid self.print_debug = pd def get_time_str(self, v): if v: return "[{}]".format(datetime.now().strftime('%H:%M:%S')) else: return "" def get_sid(self, v): if v: return self.script_id else: return "" def print_decorator(self, func): """ Decorator used to amend the current time to the start of all calls to print() """ def wrapped_func(*args,**kwargs): debug_id = "" start_chars = "" print_time = True print_sid = True if "debug" in kwargs: del kwargs["debug"] if self.print_debug: debug_id = " *** debug ***" else: return if "time" in kwargs: print_time = kwargs["time"] del kwargs["time"] if "start" in kwargs: start_chars = kwargs["start"] del kwargs["start"] if "title" in kwargs: print_sid = kwargs["title"] del kwargs["title"] return func("{}{}{}{}".format(start_chars, self.get_time_str(print_time), self.get_sid(print_sid), debug_id),*args,**kwargs) return wrapped_func def set_debug(self, debug): """ Sets whether or not this class prints debug messages. """ self.print_debug = debug def get_debug(self): return self.print_debug
UTF-8
Python
false
false
1,692
py
20
print_decorator.py
15
0.524823
0.524823
0
71
22.84507
72
dinopetrone/chelseacourtdesigns
18,915,035,989,384
a5a8404c74e021f76917fbafd8dbd096f3b855e7
b650950faa18e9104b17135eba6778247f4de422
/www/home_carousel/models.py
59e637d76e25371dbe9b81b4c53de3690726a2eb
[]
no_license
https://github.com/dinopetrone/chelseacourtdesigns
f378bd79667f95000eb9b949c70feb84165f01c2
edce009401ec3e292304bef7519db1a99a3eb8d3
refs/heads/master
2021-06-11T10:28:06.450297
2012-08-03T03:49:46
2012-08-03T03:49:46
5,276,188
0
0
null
false
2021-06-10T17:39:37
2012-08-02T18:28:11
2014-04-19T02:29:51
2021-06-10T17:39:35
364
1
0
1
Python
false
false
from django.db import models class Carousel(models.Model): large_img = models.ImageField(upload_to='photos/%d/%m/%h/%s') def __unicode__(self): return 'Carousel Item ' + str(self.pk)
UTF-8
Python
false
false
199
py
32
models.py
21
0.658291
0.658291
0
6
32.333333
65
satchab/aa
936,302,904,518
239d3da321579dfc3b01da85dd27546c6ab4c5d5
d40bb0f6a9e94f6e7678384eba0076d163f69008
/Project/__init__.py
7bef3b21943fccef11dd890b6683588e331e73ae
[]
no_license
https://github.com/satchab/aa
0f0bd2f8f5f95a82173b670d1e9e6a5ac9bc221a
e70c2348318737519cb0bae0896a1e0c35273b5e
refs/heads/master
2022-12-30T16:52:05.126878
2019-10-29T06:00:28
2019-10-29T06:00:28
212,384,377
0
1
null
false
2022-12-15T14:39:19
2019-10-02T16:05:59
2019-10-29T06:25:03
2019-10-29T06:24:54
10,016
0
1
1
Python
false
false
from flask import Flask , request , abort app = Flask(__name__) @app.route('/') def hello(): return 'hello tam' , 200 @app.route('/webhook',methods = ['POST','GET']) def webhook(): if request.method == 'POST': print(request.json) return request.json, 200 elif request.method == 'GET': return 200
UTF-8
Python
false
false
337
py
2
__init__.py
2
0.593472
0.566766
0
15
21.333333
47
abdallah-nasir/advancing-the-blog-blog-api
19,507,741,466,892
7aad40b0d5f8f78f44efca9f48142a8b86e009a1
7fea6a1c0a197f184f48df31e2bb72d64a2ff128
/blog/posts/forms.py
40003c9e8cf25dede89a39a42acbb6175ab73952
[]
no_license
https://github.com/abdallah-nasir/advancing-the-blog-blog-api
daba6809118675eb039e8c7f8f8fb6bfe55eea0f
f1eb9bc50c40aef877c1b5864b45caa3c6cec191
refs/heads/main
2023-04-08T05:35:04.813446
2021-04-14T12:21:54
2021-04-14T12:21:54
334,929,607
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django import forms from .models import * class PostForm(forms.ModelForm): class Meta: model = Post fields = ["title","body","image"] class CommentForm(forms.ModelForm): content = forms.CharField(widget=forms.Textarea,label="") class Meta: model = Comments fields = ["content"]
UTF-8
Python
false
false
344
py
23
forms.py
15
0.616279
0.616279
0
15
21.066667
61
kishkash555/biu-research
19,490,561,608,736
a7624466236bd66867a8fe6961100d79defad439
a87dba2cfe9cbbc93906de64f7d3ae49dd0d3ea5
/HashNet/HashNet.py
aafbbaf4ce245576bdc5022000eb11572462bc1a
[]
no_license
https://github.com/kishkash555/biu-research
cb7340fff82fe47356b6a532bc2fb239bd977a74
0f38e5089c97ee1bf11bb76e8825c1fbb048b5d5
refs/heads/master
2020-04-26T17:24:04.871408
2019-06-24T12:33:58
2019-06-24T12:33:58
173,711,836
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import dynet as dy import xxhash as xx import numpy as np from random import randint from collections import namedtuple, Counter layer_namedtuple = namedtuple('layer','g,w,b,m,n,hf,type'.split(',')) def hashed_matrix(k,m,n,pc): """ create a hashing dynet matrix """ def hashed_phi(a, hf, N, K): r""" creates a phi matrix from activations a using hash function hf each entry of phi is a sum of the corresponding a's $[\phi(\\mathbb{a})]_k = \sum \limits_{j:h(i,j)=k} a_j$ :param a: a (dynet) vector of inputs or activations from previous layer :param hf: a hash function [0, len(a)] -> len(w) :param N: number of phi columns (size of output) :param K: number of phi rows (size of parameter vector) :return: a dynet matrix phi which can be multiplied by w """ # phi = [list() for _ in range(m)] M = a.dim()[0][1] a_hits = Counter() phi = [] for k in range(K): phi_row = [] for i in range(N): relevant_indexes = [j for j in range(M) if hf(i,j)==k] a_hits.update(relevant_indexes) if len(relevant_indexes): phi_row.append(dy.esum([a[0][j] for j in relevant_indexes])) else: phi_row.append([]) phi.append(phi_row) return phi def evaluate_hash_layer(a, hf, W, N): M = a.dim()[0][1] holder = [] for j in range(M): cur = [] for i in range(N): cur.append(a[0][j]*W[0][hf(i,j)]) # W is stored as a row vector holder.append(dy.esum(cur)) ret = dy.reshape(dy.concatenate(holder),(1, N)) return ret def eval_network(params, train_x, train_y): layer_in = dy.inputTensor(train_x[np.newaxis]) for layer in params: g = layer.g # the nonlinearity W, b = layer.w, layer.b m, n = layer.m, layer.n # the input and output sizes if layer.type == 'normal': layer_out = layer_in * W + b layer_out = g(layer_out) elif layer.type == 'hashed': layer_hf = layer.hf # phi = hashed_phi(layer_in, layer_hf, n, W.dim()[0][1]) layer_out = evaluate_hash_layer(layer_in, layer_hf, W, n) layer_out = g(layer_out) elif layer.type == 'final': layer_out = layer_in * W + b break layer_in = layer_out return layer_out def calc_loss(params, train_x, train_y): layer_out = eval_network(params, train_x, train_y) loss = dy.pickneglogsoftmax(dy.transpose(layer_out),train_y) return loss, layer_out def network1(m,n): """ a 3- layer MLP of modest width :param m: the width of the input :param n: the width of the output """ g = dy.tanh pc = dy.ParameterCollection() d = 20 w1 = pc.add_parameters((m,d)) b1 = pc.add_parameters((1,d), init=0.) w2 = pc.add_parameters((d,d)) b2 = pc.add_parameters((1,d), init=0.) w3 = pc.add_parameters((d,n)) b3 = pc.add_parameters((1,n), init=0.) layers = [ layer_namedtuple(g,w1,b1,m,d,None,'normal'), layer_namedtuple(g,w2,b2,d,d,None,'normal'), layer_namedtuple(None,w3,b3,d,n,None,'final'), ] return layers, pc def network2(m,n): """ a 3- layer MLP with the middle layer using hashing :param m: the width of the input :param n: the width of the output :param k: the number of parameters k in the hashed matrix """ g = dy.tanh pc = dy.ParameterCollection() d = 20 k = 40 # represents 90% compression w1 = pc.add_parameters((m,d)) b1 = pc.add_parameters((1,d), init=0.) w2 = pc.add_parameters((1,k)) b2 = pc.add_parameters((1,d), init=0.) w3 = pc.add_parameters((d,n)) b3 = pc.add_parameters((1,n), init=0.) hf = make_hash(d,d,k) layers = [ layer_namedtuple(g,w1,b1,m,d,None,'normal'), layer_namedtuple(g,w2,b2,d,d,hf,'hashed'), layer_namedtuple(None,w3,b3,d,n,None,'final'), ] return layers, pc def train_network(train_data, dev_data, pc, params, out_file=None): epochs = 100 trainer = dy.SimpleSGDTrainer(pc) for ep in range(epochs): i = 0 train_loss = 0. train_good = 0 # print("EPOCH {}".format(ep)) np.random.shuffle(train_data) for train_x, train_y in train_data: dy.renew_cg() loss, scores = calc_loss(params, train_x, train_y) train_loss += loss.scalar_value() pred_class = scores.npvalue() pred_class = np.argmax(pred_class) train_good += pred_class == train_y loss.backward() trainer.update() i += 1 #if i % 100 == 1: dev_loss, dev_acc = check_loss(dev_data, params) #print("epoch: {}\ttrain_loss: {:.4f}\tdev loss: {:.4f}\tacc: {:.2f}".format(i, train_loss, dev_loss, dev_acc)) msg = "epoch: {}\ttrain_loss: {:.4f}\ttrain_acc: {:.2f}\n".format(ep, train_loss, train_good/i) +\ "epoch: {}\tdev_loss: {:.4f}\tdev_acc: {:.2f}\n".format(ep, dev_loss, dev_acc) if out_file: out_file.write(msg) print(msg) def check_loss(dev_data, params): cum_loss = 0. good = 0 for train_x, train_y in dev_data: loss, score = calc_loss(params, train_x, train_y) cum_loss += loss.value() predicted_class = np.argmax(score.npvalue()) if predicted_class == train_y: good += 1 s = len(dev_data) return cum_loss/s, good/s def make_hash(m,n,k): """ create a hash function and return it the hash takes two input parameters, m and n, which determine the range allowed in the input 0..m-1, 0..n-1 the output is in the range 0..k-1 :param m: the possible number of the hash's first parameter :param n: the possible number of the hash's second parameter """ my_rand_string = ''.join([chr(randint(0,255)) for _ in range(4)]) def hf(i,j): if i >= m or j >= n: raise ValueError("check range: {} < {}, {} < {}?".format(i,m,j,n)) num = i * n + j h = xx.xxh32_intdigest(my_rand_string+ int_to_str(num)) return h % k return hf def int_to_str(n): bts = '' while n: bts += chr(n % 256) n = n - (n % 256) n = int(n/256) return bts
UTF-8
Python
false
false
6,399
py
26
HashNet.py
19
0.556962
0.540866
0
201
30.81592
119
maleadt/tado-charts
10,359,461,159,576
85475d1df4009e14f59865869e7c64be4be0a0f4
0ef279313598b67ec4d56abc4f8a257290d69e1a
/bin/plot.py
786fa2a447656c80d872d37157288f8801df2238
[]
no_license
https://github.com/maleadt/tado-charts
82ad8aa7c43a2044ee209601a615c3d0cc1ccf76
11270c40d2d8a886b6d2b4c7ece3311050bdaca2
refs/heads/master
2021-01-23T01:08:04.810978
2017-11-15T12:31:17
2017-11-15T12:31:17
85,877,904
6
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 import os import sys import datetime import pymysql import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import matplotlib.ticker as ticker import matplotlib.patches as mpatches from scipy.signal import savgol_filter import private if len(sys.argv) > 1: output_dir = sys.argv[1] else: output_dir = os.path.dirname(sys.argv[0]) from enum import Enum class Resolution(Enum): COARSE = 1 FINE = 2 def smooth(y, factor=Resolution.FINE): points = len(y) if points <= 3: return y if factor == Resolution.COARSE: window_length = 75 polyorder = 1 if factor == Resolution.FINE: window_length = 31 polyorder = 2 if window_length > points: window_length = (points if points%2 == 1 else points-1) if polyorder >= window_length: polyorder = window_length - 1 return savgol_filter(y, window_length, polyorder) def plot(zone, timestamps, values, time_lower, time_upper, name): outsideTemperature, setpoint, temperature, humidity, heatingpower = values # mask heatingpower==0 for clarity heatingpower = np.ma.masked_where(heatingpower == 0, heatingpower) # initialize fig, ax1 = plt.subplots(figsize=(12,5)) # temperatures ax1.set_xlim([time_lower, time_upper]) ax1.xaxis.set_major_locator(mdates.HourLocator()) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H')) ax1.set_ylabel('temperature (°celsius)') ax1.yaxis.set_major_locator(ticker.MultipleLocator(1)) ax1.yaxis.grid(alpha=0.25) ax1.plot(timestamps, smooth(outsideTemperature, Resolution.COARSE), label="Outside", linestyle="solid", color="gray") ax1.plot(timestamps, setpoint, label="Setpoint", linestyle="solid", color="lime", alpha=.5) ax1.plot(timestamps, smooth(temperature), label="Measured", linestyle="solid", color="orangered") ax1.legend(loc='lower left', bbox_to_anchor=(0, -0.175, 1, 0), ncol=3, fancybox=True, shadow=True) # percentages ax2 = ax1.twinx() ax2.set_xlim([time_lower, time_upper]) ax2.set_ylabel('percentage') ax2.set_ylim([0,100]) ax2.yaxis.set_major_locator(ticker.MultipleLocator(10)) p1 = ax2.plot(timestamps, smooth(humidity), label="Humidity", linestyle="solid", color="darkcyan", alpha=.1,) ax2.fill_between(timestamps, 0, smooth(humidity), color='darkcyan', alpha=.1) p2 = ax2.plot(timestamps, heatingpower, label="Heating power", linestyle="solid", color="orange", alpha=.15) ax2.fill_between(timestamps, 0, heatingpower, color='orange', alpha=.15) # the fill_between can't be represented directly in the legend, # so create proxy agents which don't live in the plot p1_proxy = mpatches.Patch(color=p1[0].get_color(), alpha=p1[0].get_alpha(), label=p1[0].get_label()) p2_proxy = mpatches.Patch(color=p2[0].get_color(), alpha=p2[0].get_alpha(), label=p2[0].get_label()) ax2.legend(handles=[p1_proxy, p2_proxy], loc='lower right', bbox_to_anchor=(0, -0.175, 1, 0), ncol=2, fancybox=True, shadow=True) # finalize now = datetime.datetime.now() ax1.annotate('Last update: {}'.format(now.strftime("%H:%M")), fontsize=10, color="gray", xy=(1, 1), xycoords='axes fraction', horizontalalignment='right', verticalalignment='upper') plt.tight_layout() fig.subplots_adjust(bottom=0.15) plt.savefig("{}/{}.png".format(output_dir, name)) ## main mysql = pymysql.connect(host=os.getenv('MYSQL_HOST', private.mysql_host), user=os.getenv('MYSQL_USER', private.mysql_user), password=os.getenv('MYSQL_PASSWORD', private.mysql_password), db=os.getenv('MYSQL_DATABASE', private.mysql_database)) time_lower = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) time_upper = time_lower + datetime.timedelta(days=1) for zone in private.zones: with mysql.cursor() as cursor: sql = """ SELECT * FROM `{}` WHERE (timestamp >= %s and timestamp < %s) """.format(zone) cursor.execute(sql, (time_lower, time_upper)) data = cursor.fetchall() timestamps = np.asarray(list(map(lambda vals: vals[0], data))) def convert(val): if val is None: # we can't create an array with None, or operations like 'isfinite' break return float('NaN') else: return float(val) values = np.asarray(list(map(lambda vals: tuple(map(convert, vals[1:])), data))).T # plot daily chart plot(zone, timestamps, values, time_lower, time_upper, "{:04d}{:02d}{:02d}_{}".format(time_lower.year, time_lower.month, time_lower.day, zone)) mysql.close()
UTF-8
Python
false
false
4,979
py
9
plot.py
4
0.628164
0.60446
0
157
30.707006
104
Guyebai/Geeks_DongHua
2,886,218,030,937
564648be126a20b943e2f7ce1da93b8e8dfc9db9
a4905aa47878f35d4937a4834749caeee3f6e260
/fix/models.py
4bde2f395992bf30c4b917e7700fab9a43579a52
[]
no_license
https://github.com/Guyebai/Geeks_DongHua
e94d05d6dff8b572be4d385132eebf9718c80f8d
d6ca97c0176218948d2f182bcaa6a564c6d7326d
refs/heads/master
2020-09-10T04:04:26.650834
2017-09-26T10:27:58
2017-09-26T10:27:58
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding=utf-8 from __future__ import unicode_literals from django.db import models from django.utils import timezone from myuser.models import User from myuser.constants import Sex_Choice from util import format_time class Fault(models.Model): name = models.CharField('故障', max_length=255) order = models.IntegerField('排序', null=True, blank=True) class Meta: verbose_name = '故障列表' verbose_name_plural = verbose_name ordering = ('order',) def __str__(self): return self.name def __unicode__(self): return '%s' % self.name class Fix(models.Model): # 预约信息 name = models.CharField('姓名', max_length=255) dorm_number = models.IntegerField(u'楼号') tel = models.CharField('电话', max_length=255) fault = models.ForeignKey(Fault, verbose_name='故障') mark = models.TextField('备注', blank=True, default='') appointment_time = models.DateTimeField(u'预约时间', default=timezone.now) # 维修信息 model = models.CharField(max_length=255, null=True, blank=True) fixer = models.ForeignKey(User, verbose_name='修理员', null=True, default=None) fix_time = models.DateTimeField('修理时间', blank=True, null=True) is_fix = models.BooleanField('是否维修', default=False) class Meta: verbose_name = '预约名单' verbose_name_plural = verbose_name ordering = ('is_fix', '-appointment_time') def __str__(self): return self.name def __unicode__(self): return '%s' % self.name def get_fault(self): return self.fault.name def get_appointment_time(self): return format_time(self.appointment_time) def get_fix_time(self): return format_time(self.fix_time) class Recruit(models.Model): name = models.CharField('姓名', max_length=255) sex = models.IntegerField('性别', choices=Sex_Choice) tel = models.CharField('电话', max_length=255) major = models.CharField('专业', max_length=255) desc = models.TextField() reason = models.TextField(blank=True, null=True) # Create your models here.
UTF-8
Python
false
false
2,164
py
96
models.py
70
0.655977
0.645287
0
72
27.583333
80
wingtonbrito/test-build
12,661,563,602,197
f52f6f7416c4e772e8cf45efd04cedcb16475a61
d2fab1990bd12cf401cdf93766f2bef3d76c25f9
/src/dataflow/libs/log.py
64a1484af9659f9e6a503a0693f8a29b02198c72
[]
no_license
https://github.com/wingtonbrito/test-build
26fe3098ad6695e62c754459976f98a3e399e2fc
261c832b569f52ec4ad370c0d72afce2a5b2c234
refs/heads/master
2023-01-06T13:38:02.315272
2020-06-09T21:04:09
2020-06-09T21:04:09
271,098,141
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import apache_beam as beam import logging class Log(beam.DoFn): def __init__(self, func='info'): self.__func__ = getattr(logging.getLogger(), func) def process(self, payload): self.__func__(f'>> logging payload: {payload}') yield payload
UTF-8
Python
false
false
273
py
154
log.py
135
0.615385
0.615385
0
11
23.818182
58
JackPotte/snottywong
5,162,550,718,455
06db479219bef02fc2a451f4864f23079c3756ed
bf9c643237b8996ddee573bc3fa4bd0f899aab34
/cgi-bin/votecounter.cgi
1692919db2da261a89e3bd6b32d66e7e7edc2aa8
[ "MIT" ]
permissive
https://github.com/JackPotte/snottywong
c846197d66e94f59f2570f8dd4952fe44ca20b38
c4f5c3f5322fb73669c16dbd374ff60b3fcd4970
refs/heads/master
2021-01-19T23:40:41.628010
2017-05-28T23:37:18
2017-05-28T23:37:18
89,007,737
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- #TODO remove commented out material #TODO Logging import sys import os import traceback import cgi import re import urllib import htmllib import datetime import MySQLdb page = "" def main(): global discussiontype global page print """<!doctype html> <HTML> <HEAD> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> <LINK href="greyscale.css" rel="stylesheet" type="text/css"> <LINK href="menubar3.css" rel="stylesheet" type="text/css"> <TITLE>AfD Stats</TITLE> </HEAD> <BODY id="no"> <script type="text/javascript" src="/menubar.js"></script> <br> <div style="width:875px;"> <H1>AfD Vote Counter</H1>""" try: form = cgi.FieldStorage() if "page" in form: page = form["page"].value else: errorout("Missing required parameter: page.") fast = True if "fast" in form: if form["fast"].value.lower() in ["no", "false", "0", "slow"]: fast = False if page == "": errorout("Missing required parameter: page.") page = urllib.unquote(page).replace("_", " ") if os.environ["REMOTE_ADDR"].startswith(("89.151.116.5", "46.236.24", "46.236.7")): errorout("Your IP address has been flagged for potential abuse. Please post a message to User talk:Scottywong on the English Wikipedia, or alternatively send an email to snottywong.wiki@gmail.com to prove that you are a human, and to explain why you've been consistently making so many queries on this tool.") f = open("votecounterlog.txt", "a") f.write("<log><ip>" + os.environ["REMOTE_ADDR"] + "</ip><page>" + page + "</page><timestamp>" + datetime.datetime.today().strftime("%m/%d/%y %H:%M:%S") + "</timestamp></log>\n") f.close() if not page.lower().startswith("wikipedia:articles for deletion/"): errorout("Requested page is not an AfD.") db = MySQLdb.connect(db='enwiki_p', host="enwiki.labsdb", read_default_file=os.path.expanduser("~/replica.my.cnf")) cursor = db.cursor() data = APIget(page) data = unescape(data) data = re.sub("<(s|strike|del)>.*?</(s|strike|del)>", "", data, flags=re.IGNORECASE|re.DOTALL) #remove all struck-through text, so that it is ignored #TODO remove commented out material #votes = re.findall("\n.*?'{3}?.*?'{3}?.*?\(UTC\)", data[data.find("=="):], re.IGNORECASE) """ votes = [] tempdata = data[data.find("=="):] while True: part = tempdata.partition("(UTC)") if not part[1]: break votes.append(part[0] + part[1]) tempdata = part[2] """ votes = [] voteiter = re.finditer("'{3}?.*?'{3}?.*?((\[\[User.*?\]\].*?\(UTC\))|(\{\{unsigned.*?\}\})|(<!--\s*Template:Unsigned\s*-->))", data[data.find("=="):], re.IGNORECASE) for i in voteiter: votes.append(i.group(0)) errors = 0 dupvotes = 0 votedict = {} spa = {} for vote in votes: try: voter = re.match("\[\[User.*?:(.*?)(?:/|#|\||(?:\]\]))", vote[vote.lower().rfind("[[user"):], re.IGNORECASE) if not voter: voter = re.search("\{\{unsigned\|(.*?)(\||(\{\{))", vote, re.IGNORECASE) if not voter: voter = re.search("<span class=\"autosigned\">.*?\[\[User:(.*?)\|.*?<!--\s*Template:Unsigned\s*-->", vote, re.IGNORECASE) if not voter: continue voter = voter.group(1).strip() bolded = re.search("'''(.*?)'''", vote) if not bolded: #print "NOT BOLDED - " + cgi.escape(vote) + "<br><br>" continue votetype = parsevote(bolded.group(1)) if votetype == None: #Unparseable #print "UNPARSEABLE - " + bolded.group(1) + " - " + cgi.escape(vote) + "<br><br>" continue if voter in votedict.keys(): dupvotes += 1 votedict[voter] = votetype if not fast: editcount = getEditCount(voter, cursor) if editcount < 500 and editcount > 0: if voter not in spa.keys(): spa[voter] = editcount #print "SUCCESSFUL " + votetype + " - " + cgi.escape(vote) + "<br><br>" except: errors += 1 print sys.exc_info()[0] print "<br>" print traceback.print_exc(file=sys.stdout) print "<br>" print vote print "<br>" continue print "<H2>" + page + "</H2>" if len(votedict) == 0: errorout("No votes were found on this page.") nominator = APIfirsteditor(page) if nominator: if nominator in votedict.keys(): dupvotes += 1 else: votedict[nominator] = "Delete" print "<ul>" for v in ["Keep", "Speedy Keep", "Delete", "Speedy Delete", "Merge", "Redirect", "Transwiki", "Userfy"]: if votedict.values().count(v): voterlist = [] for voter in votedict.keys(): if votedict[voter] == v: voterlist.append(voter) print "<li><b>" + v + " votes: " + str(votedict.values().count(v)) + "</b> <small>(" + ", ".join(voterlist) + ")</small>" print "</ul>" print "<BR>Found " + str(dupvotes) + " potential duplicate vote" + ("" if dupvotes==1 else "s") + ".<BR>" if spa.keys(): print "<BR>Potential SPA's (voters with less than 500 edits):\n<ul>\n" for s in spa.keys(): print "<li>" + s + " (" + str(spa[s]) + " edits) (voted " + votedict[s] + ")</li>" print "</ul>" if not spa.keys() and not fast: print "<BR>No potential SPA's found.</BR>" if fast: print '<BR><small><a href="votecounter.cgi?page=' + form['page'].value + '&fast=false">Click here to check for possible SPA\'s (can be slow for large AfD\'s)</a></small><BR>' if errors: print "<BR>Encountered " + str(errors) + " non-fatal errors while parsing this page.<BR>" print "<br><br><br><small>Disclaimer: This tool only searches for <b>bolded</b> votes in AfD's, and it may not even find those with 100% accuracy. Please do not rely on the accuracy of this tool for critical applications, and also keep in mind that consensus is not determined by counting votes.</small><br>" print '<br><small>Bugs, suggestions, questions? Contact the author at <a href="http://en.wikipedia.org/wiki/User_talk:Scottywong">User talk:Scottywong</a></small><br>' print "</div></BODY>\n</HTML>" except: errors += 1 print sys.exc_info()[0] print "<br>" print traceback.print_exc(file=sys.stdout) print "<br>" def APIget(p): try: u = urllib.urlopen("http://en.wikipedia.org/w/api.php?action=query&prop=revisions&titles=" + urllib.quote(p).replace(" ", "_") + "&rvprop=content&format=xml") xml = u.read() u.close() if re.search(r'<page .*? missing="".*?/>', xml): errorout("Page doesn't exist: " + p) text = re.search(r'<rev.*?xml:space="preserve">(.*?)</rev>', xml, re.DOTALL).group(1) return text except: #print sys.exc_info()[0] #print "<br>" #print traceback.print_exc(file=sys.stdout) #print "<br>" errorout("Error getting content of page: " + p) def APIfirsteditor(p): try: u = urllib.urlopen("http://en.wikipedia.org/w/api.php?action=query&prop=revisions&titles=" + urllib.quote(p).replace(" ", "_") + "&rvlimit=1&rvprop=timestamp|user&rvdir=newer&format=xml") xml = u.read() u.close() return re.search("<rev user=\"(.*?)\".*?/>", xml).group(1) except: #print sys.exc_info()[0] #print "<br>" #print traceback.print_exc(file=sys.stdout) return None def getEditCount(username, cursor): try: cursor.execute("SELECT COUNT(*) FROM revision_userindex WHERE rev_user_text=%s;", (username)) ec = cursor.fetchall()[0][0] return int(ec) except: return 0 def unescape(s): p = htmllib.HTMLParser(None) p.save_bgn() p.feed(s) return p.save_end() def errorout(errorstr): print "ERROR: " + errorstr + "<br><br>Please try again.<br>" print '<br><small>Bugs, suggestions, questions? Contact the author at <a href="http://en.wikipedia.org/wiki/User_talk:Scottywong">User talk:Scottywong</a></small><br>' print "</div></BODY>\n</HTML>" sys.exit(0) def parsevote(v): v = v.lower() if "comment" in v: return None elif "note" in v: return None elif "merge" in v: return "Merge" elif "redirect" in v: return "Redirect" elif "speedy keep" in v: return "Speedy Keep" elif "speedy delete" in v: return "Speedy Delete" elif "keep" in v: return "Keep" elif "delete" in v: return "Delete" elif "transwiki" in v: return "Transwiki" elif ("userfy" in v) or ("userfied" in v) or ("incubat" in v): return "Userfy" else: return None main()
UTF-8
Python
false
false
9,643
cgi
61
votecounter.cgi
30
0.530022
0.522348
0
253
37.114625
322
justinro-underscore/MiniInfiniteRunner
1,795,296,351,764
9f2f8d38147eb1ee33bbafb7e1f401917a1db7bd
909e3310cdf4c03a771c79919301a502b47b4d88
/ground.py
0444568c160512f521bc6024dfe49de890bbd587
[]
no_license
https://github.com/justinro-underscore/MiniInfiniteRunner
95383cc96c1977a979e59707fc08dce62546ba28
d6503d217cea7165d131e00b61d1a9bd44c4d934
refs/heads/main
2023-04-10T19:03:03.637311
2021-04-20T02:39:37
2021-04-20T02:39:37
358,453,231
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from random import random from moving_objects.ground_mark import GroundMark from moving_objects.cactus import Cactus from constants import height, width, ground_height, init_ground_speed, ground_mark_probability, max_ground_mark_length, init_cactus_space, cactus_space_increment class Ground: def __init__(self): self.ground_speed = init_ground_speed self.ground_marks = [GroundMark()] self.curr_ground_length = 0 self.generating_ground_marks = False self.cacti = [] self.cacti_space = init_cactus_space self.cactus_wait_count = self.cacti_space def __move_objects(self): for moving_object in self.ground_marks + self.cacti: moving_object.move(self.ground_speed) if len(self.ground_marks) > 0 and self.ground_marks[0].is_offscreen(): self.ground_marks = self.ground_marks[1:] if len(self.cacti) > 0 and self.cacti[0].is_offscreen(): self.cacti = self.cacti[1:] def __generate_ground_marks(self): # If not currently generating a ground mark... if not self.generating_ground_marks: # Set the ground mark length to what is currently shown on screen self.curr_ground_length += self.ground_speed # Use linear interpolation to see if we should cut off the ground mark at its current length probability = self.curr_ground_length / max_ground_mark_length end_mark = random() < probability # If we should cut off the mark... if end_mark: # Set the ground mark's length to its current length self.ground_marks[len(self.ground_marks) - 1].length = self.curr_ground_length # Reset to prepare for generating a new ground mark self.generating_ground_marks = True # If currently generating a ground mark and we should place a new ground mark if self.generating_ground_marks and random() < ground_mark_probability: # Append a new ground mark to the front of the queue self.ground_marks += [GroundMark()] self.curr_ground_length = 0 self.generating_ground_marks = False def __generate_cacti(self): return self.cactus_wait_count -= self.ground_speed if self.cactus_wait_count < 0: self.cacti += [Cactus()] self.cactus_wait_count = self.cacti_space def update(self): self.__move_objects() self.__generate_ground_marks() self.__generate_cacti() def increase_speed(self): self.ground_speed += 1 self.cacti_space += (cactus_space_increment * self.ground_speed) // 3 def render(self, draw): draw.line((0, height - ground_height, width, height - ground_height), fill=255, width=1) for ground_mark in self.ground_marks: ground_mark.render(draw) for cactus in self.cacti: cactus.render(draw)
UTF-8
Python
false
false
2,732
py
10
ground.py
9
0.68631
0.680088
0
71
37.478873
161
GraphicalDot/Sportsunity
16,681,652,994,802
c66dcf83483e769a9aaacd6c8df5fa07e0b7f109
728325025dcd9cef1e94298ddcf0163fe26e8503
/leagues_elasticsearch.py
86b5694910b6c1273edbf6c6db20430336e56ed8
[]
no_license
https://github.com/GraphicalDot/Sportsunity
6bafdf21f93c14b5e8e58ad196424c530ecd5e21
085d7237b849dfec1e59b4236eef10e0ab87179c
refs/heads/master
2021-03-19T15:18:09.553691
2016-04-11T09:03:50
2016-04-11T09:03:50
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import requests import json from pyfiglet import figlet_format from termcolor import cprint import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import connection ES_CLIENT = connection.get_elastic_search_connection() class GetTeams: def __init__(self,renew_indexes=False): self.settings={'settings': {'analysis': {'analyzer': {'custom_analyzer': {'filter': ['lowercase', 'asciifolding','edge_ngram'], 'tokenizer': 'ngram_tokenizer', 'type':'custom'}}, 'filter': {'edge_ngram': {'max_gram': '100', 'min_gram': '2', 'type': 'edge_ngram'}}, 'tokenizer': {'ngram_tokenizer': {'max_gram': '100', 'min_gram': '2', 'token_chars': ['letter', 'digit'], 'type': 'edgeNGram'}}} }} self.mappings = {'dynamic': 'strict', 'properties': {'league_autocomplete': {'analyzer': 'custom_analyzer', 'type':'string'}, 'league_name': {'copy_to': ['league_autocomplete'], 'type': 'string'}, 'league_id': {'index': 'not_analyzed', 'type': 'string'}, 'season': {'index': 'not_analyzed', 'type': 'long'}, 'flag_image':{'index': 'not_analyzed', 'type': 'string'}, 'region' : {'index': 'not_analyzed', 'type': 'string'}, }} if not ES_CLIENT.indices.exists("leagues"): self.prep_teams_index() self.index_data() if renew_indexes: ES_CLIENT.indices.delete(index="leagues") self.prep_teams_index() self.index_data() def prep_teams_index(self): ES_CLIENT.indices.create(index="leagues", body=self.settings) ES_CLIENT.indices.put_mapping(index="leagues", doc_type="leagues", body = {"leagues": self.mappings }) a = "Mappings updated for {0}".format("leagues") cprint(figlet_format(a, font='starwars'), attrs=['bold']) def index_data(self): response = requests.get('http://52.74.75.79:8000/get_football_leagues') data = json.loads(response.content) for league in data['data']: print ES_CLIENT.index(index="leagues", doc_type="leagues", body=league) if __name__=="__main__": obj = GetTeams(renew_indexes=True)
UTF-8
Python
false
false
3,431
py
63
leagues_elasticsearch.py
59
0.388808
0.382687
0
69
48.594203
140
anniezhn/CSC210
17,566,416,253,317
4ddc7555a14250bee4e06bff33ef791189a4dce2
41061be9fc4f639594c0d6d5002ad656d78f0276
/cgi-bin/delete.py
18e8e5ef1fa4fb98e71b8a49576957816d9b78c1
[]
no_license
https://github.com/anniezhn/CSC210
286f785821df0bef8e4143beaf188e7bc59b704a
716858302339fd6a2c471ca2550ca64dbe19c204
refs/heads/master
2016-09-06T21:51:04.490068
2014-12-10T05:54:33
2014-12-10T05:54:33
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import cgitb, cgi, os, Cookie, sys import MySQLdb as mdb #setup cgitb.enable() #retrieve username from cookie cookie_monster = Cookie.SimpleCookie(os.environ['HTTP_COOKIE']) username = cookie_monster['user'].value conn = mdb.connect('localhost', 'tnichol1', 'TeamTAJ2!', 'tnichol1_CSC210') #connect to MySQL cur = conn.cursor() #expire all cookies cookie_monster['session_id'] = "" cookie_monster['user'] = "" cookie_monster['session_id']['expires']='Thu, 01 Jan 1970 00:00:00 GMT' #apparently this is the convention cookie_monster['session_id']['path']='/' cookie_monster['user']['expires']='Thu, 01 Jan 1970 00:00:00 GMT' #apparently this is the convention cookie_monster['user']['path']='/' #remove user from database cur.execute('SELECT UserID FROM Users WHERE Username=%s;', username) userID = cur.fetchone()[0] cur.execute("DELETE FROM Users WHERE Username=%s",username) conn.commit() cur.execute("DELETE FROM Passwords WHERE UserID=%s",userID) conn.commit() print "Content-type: text/html" print cookie_monster print print "<html>" print "<head><title>Your account has been deleted</title></head>" print "<body>" print "<h1>OK, " + username + ", your account has been deleted. Thank you for using our site.</h1>" print "<p>Click here to go back to our homepage:</p>" print '<p><a href="http://tnichols.rochestercs.org">Home Page</a></p>' print "</body>" print "</html>"
UTF-8
Python
false
false
1,402
py
42
delete.py
28
0.71398
0.690442
0
41
33.170732
106
th3seus/fbctf-load-test
5,368,709,123,107
d2fd13ef100cee1bb485bbcd3495fcdcaa2b1edf
51b743f2d87207df7b5163d1a20e29522289e26f
/test.py
a417d85ec12bbc780fc8277742b7da9f74f2c46e
[]
no_license
https://github.com/th3seus/fbctf-load-test
3420acdef18986ae8444af813a4d47c8de677ed2
be4fa0b483139f6a5fc7366cba3760a64dd4769e
refs/heads/master
2020-03-23T20:33:40.322553
2018-07-24T22:46:53
2018-07-24T22:46:53
142,048,388
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from selenium import webdriver from selenium.common.exceptions import TimeoutException from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support import expected_conditions as EC import time def team_login(driver, user, password): """ team login using username and password """ login = driver.find_element_by_link_text("Login") login.click() # submit creds driver.find_element_by_name("team_name").send_keys(user) driver.find_element_by_name("password").send_keys(password) # find login button and submit login = driver.find_element_by_id("login_button") login.click() def view_navigation_menu(driver): """ returns navigation menu element to be used with other navigation functions """ menu = driver.find_element_by_css_selector(".nav-left") return menu def view_tutorial(driver): """ opens up and steps through tutorial """ nav = view_navigation_menu(driver) tutorial = driver.find_element_by_css_selector(".fb-init-tutorial") tutPages = 8 # find nav bar, then click the link to open tutorial actions = ActionChains(driver) actions.move_to_element(nav) time.sleep(2) actions.click(tutorial) actions.perform() # step through tutorial pages for x in range(0,9): print "at window %d" % x time.sleep(2) nextBtn = False while nextBtn == False: nextBtn = False nextBtn = WebDriverWait(driver, 3).until(find_tutorial_next_button) print nextBtn if EC.staleness_of(nextBtn) == False: actions.move_to_element(nextBtn) time.sleep(2) actions.click(nextBtn) time.sleep(2) actions.perform() # tutPages -= 1 else: nextBtn = False def find_tutorial_next_button(driver): print 'looking for button' btn = driver.find_element_by_css_selector(".cta--yellow") if btn: print "found button" return btn else: print 'did not find button' return False driver = webdriver.Firefox() user = "team1" password = "password3" # go to homepage driver.get("https://52.53.207.37/index.php") assert "Facebook CTF" in driver.title # login team_login(driver, user, password) WebDriverWait(driver, 10).until(EC.title_contains("Facebook CTF | Gameboard")) assert "Facebook CTF | Gameboard" in driver.title, "not loaded properly" view_tutorial(driver) """ try: view_tutorial(driver) WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.LINK_TEXT, "Skip to play"))) finally: print "finally" time.sleep(15) driver.close() """
UTF-8
Python
false
false
2,824
py
2
test.py
2
0.660057
0.65085
0
93
29.365591
99
KieranHauser/TorontoCoffee
6,244,882,478,625
5bb2dc359451e2af06f48149fa129115c5a93f9b
399b7b74fda0d863d59234285aa5ab95efcc5a17
/shop/urls.py
70ba52273f7b35f846b828b394856e7476b3c827
[]
no_license
https://github.com/KieranHauser/TorontoCoffee
28e871013a35c6e6c3e6c9fec5ab4cd8f0487e6a
3571eafe13d0fd447a31fa4323caa0eccc8fbd8a
refs/heads/master
2021-08-07T07:35:25.578249
2017-11-07T21:20:10
2017-11-07T21:20:10
105,454,681
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls import url from django.contrib import admin from django.views.generic import TemplateView from .views import ShopCreateView, ShopDetailView, ShopListView urlpatterns = [ url(r'^create/', ShopCreateView.as_view(), name='create'), url(r'^(?P<slug>[\w-]+)/$', ShopDetailView.as_view(), name='detail'), url(r'$', ShopListView.as_view(), name='list'), ]
UTF-8
Python
false
false
384
py
14
urls.py
13
0.705729
0.705729
0
10
37.4
73
hydrargyrum/pytat
12,232,066,877,578
716ad811843a07dd25104aa221d417267069bce7
494857bc8583ff83fb1e972aa2f460d287576670
/examples/sample2.py
882270c61c11c93d105b8f87f7e841905e79253b
[ "WTFPL" ]
permissive
https://github.com/hydrargyrum/pytat
3bcbf2ebbf62a4ca3fdd885729aea6f0ef5ca774
9a7796177990ae55aa4ea3686bd92d7e91fd3404
refs/heads/master
2020-03-27T05:35:46.204018
2018-11-11T14:04:11
2018-11-11T14:04:11
146,033,398
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys def f(): print(42) for i in range(2): print(4, 2) print(42, file=sys.stderr)
UTF-8
Python
false
false
111
py
15
sample2.py
13
0.531532
0.468468
0
7
14.571429
30
orange21cn/OrcTestToolsKit
6,640,019,454,947
fc6e6db131110562ebd61d804e088d76d3c60119
4adc1d1b8f9badefcd8c25c6e0e87c6545ccde2c
/OrcApi/Batch/BatchDefMod.py
afb6746181f6da20b52a345b533f664c66b265ad
[]
no_license
https://github.com/orange21cn/OrcTestToolsKit
eb7b67e87a608fb52d7bdcb2b859fa588263c136
69b6a3c382a7043872db1282df4be9e413d297d6
refs/heads/master
2020-04-15T07:30:35.485214
2017-09-30T06:16:17
2017-09-30T06:16:17
68,078,991
5
3
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from datetime import datetime from OrcLib.LibCommon import OrcString from OrcLib.LibCommon import is_null from OrcLib.LibException import OrcDatabaseException from OrcLib.LibDatabase import TabBatchDef from OrcLib.LibDatabase import gen_id from OrcLib.LibDatabase import orc_db from OrcLib.LibLog import OrcLog class BatchDefMod(TabBatchDef): """ Test data management """ __session = orc_db.session def __init__(self): TabBatchDef.__init__(self) self.__logger = OrcLog("api.batch.mod.batch_def") def usr_search(self, p_cond=None): """ :param p_cond: :return: """ # 判断输入参数是否为空 cond = p_cond if p_cond else dict() # 查询条件 like _like = lambda p_flag: "%%%s%%" % cond[p_flag] # db session result = self.__session.query(TabBatchDef) if 'id' in cond: # 查询支持多 id if isinstance(cond["id"], list): result = result.filter(TabBatchDef.id.in_(cond['id'])) else: result = result.filter(TabBatchDef.id == cond['id']) if 'pid' in cond: result = result.filter(TabBatchDef.pid == cond['pid']) if 'batch_no' in cond: result = result.filter(TabBatchDef.batch_no == cond['batch_no']) if 'batch_name' in cond: result = result.filter(TabBatchDef.batch_name.ilike(_like('batch_name'))) if 'batch_desc' in cond: result = result.filter(TabBatchDef.batch_desc.ilike(_like('batch_desc'))) for i in result.all(): print i.to_json() return result.all() def usr_search_all(self, p_cond): """ 查询 batch 所有节点直至根节点 :param p_cond: :return: """ result = list() for _batch in self.usr_search(p_cond): if _batch not in result: # 获取当前用例的根用例组 _root = self.__get_root(_batch) # 获取根用例组的所有子用例 _tree = self.__get_tree(_root) # 加入结果树 result.extend(_tree) return result def usr_search_tree(self, p_id): """ 获取节点及其所有子节点 :param p_id: :return: """ batch = self.usr_search(dict(id=p_id)) if batch: return self.__get_tree(batch[0]) else: return list() def usr_search_path(self, p_id): """ 获取路径至根节点 :param p_id: :return: """ batch_data = self.__session.query(TabBatchDef).filter(TabBatchDef.id == p_id).first() batch_no = batch_data.case_no if batch_data else None batch_pid = batch_data.pid if batch_data else None if batch_pid: return "%s.%s" % (self.usr_get_path(batch_pid), batch_no) else: return batch_no def __get_root(self, p_item): """ :param p_item: :return: """ if p_item.pid is None: return p_item _res = self.__session.query(TabBatchDef).filter(TabBatchDef.id == p_item.pid).first() if _res.pid is None: return _res else: return self.__get_root(_res) def __get_tree(self, p_item): """ :param p_item: :return: """ _tree = [p_item] _items = self.__session.query(TabBatchDef).filter(TabBatchDef.pid == p_item.id).all() for _item in _items: _tree.extend(self.__get_tree(_item)) return _tree def usr_add(self, p_data): """ 新增 :param p_data: :return: """ node = TabBatchDef() # Create id node.id = gen_id("batch_def") # batch_no node.batch_no = self.__create_no() # batch_type node.batch_type = p_data['batch_type'] if 'batch_type' in p_data else None # pid node.pid = p_data['pid'] if 'pid' in p_data else None # batch_name node.batch_name = p_data['batch_name'] if 'batch_name' in p_data else "" # batch_desc, comment node.batch_desc = p_data['batch_desc'] if 'batch_desc' in p_data else "" node.comment = p_data['comment'] if 'comment' in p_data else "" # create_time, modify_time node.create_time = datetime.now() node.modify_time = datetime.now() try: self.__session.add(node) self.__session.commit() except: raise OrcDatabaseException return node def __create_no(self): """ Create a no :return: """ base_no = OrcString.get_data_str() for _index in range(100): if 10 < _index: batch_no = "%s%s" % (base_no, _index + 1) else: batch_no = "%s0%s" % (base_no, _index + 1) _item = self.__session.query(TabBatchDef).filter(TabBatchDef.batch_no == batch_no).first() if _item is None: return batch_no return 1 def usr_update(self, p_cond): """ Update :param p_cond: :return: """ for t_id in p_cond: if "id" == t_id: continue _data = None if is_null(p_cond[t_id]) else p_cond[t_id] _item = self.__session.query(TabBatchDef).filter(TabBatchDef.id == p_cond['id']) _item.update({t_id: _data}) self.__session.commit() def usr_delete(self, p_id): """ 删除 :param p_id: :return: """ print "-=-=", p_id self.__session.query(TabBatchDef).filter(TabBatchDef.id == p_id).delete() self.__session.commit() def usr_get_path(self, p_id): _no = self.__session.query(TabBatchDef.batch_no).filter(TabBatchDef.id == p_id).first()[0] _pid = self.__session.query(TabBatchDef.pid).filter(TabBatchDef.id == p_id).first()[0] if _pid is None: return _no else: return "%s.%s" % (self.usr_get_path(_pid), _no)
UTF-8
Python
false
false
6,285
py
240
BatchDefMod.py
212
0.512821
0.510697
0
236
24.944915
102
astrosilverio/PokeDB
3,040,836,859,093
8a62c1a828ddc4c8150db5f27e14165c52c1ba68
483fcc2e88ddd4828567606bc625c69d95306b7a
/pokedb/storage/__init__.py
6f44e28d976e65e232bb3a1622efdd0a6345bef1
[]
no_license
https://github.com/astrosilverio/PokeDB
c5e7eae67867deb6a17b1352a95d8de5ea3c8834
e2f811e6f135cb761f94faf315f2679273f6003f
refs/heads/master
2020-03-31T10:39:34.838719
2019-01-09T20:42:57
2019-01-09T20:42:57
152,143,406
1
0
null
true
2019-01-09T20:42:58
2018-10-08T20:37:11
2018-11-28T22:30:40
2019-01-09T20:42:58
61
0
0
0
Python
false
null
""" API to access layer: `get` `write` `sync` """ import os from pokedb.storage.pager import Pager from pokedb.storage.serializer import serialize, deserialize DBFILE = os.getenv('DBFILE', 'test.db') _pager = None # In-memory storage _storage = dict() _temp = dict() _table_schema = { 'main': ('value',), } def start(): global _pager _pager = Pager(DBFILE) def get_row(txn_id, table, row_id, page_num): raw_data = _storage.get(row_id, None) updated_value = _temp[txn_id].get(row_id, None) if updated_value: raw_data = updated_value if raw_data: schema = _table_schema.get(table) data = deserialize(schema, raw_data) else: data = None return {row_id: data} def write_row(txn_id, table, row_id, data, page_num): schema = _table_schema.get(table) raw_data = serialize(schema, data) _temp[txn_id][row_id] = raw_data return page_num def sync(page_num): _pager.flush_page(page_num) return page_num def stop(): if _pager: return _pager.db_close()
UTF-8
Python
false
false
1,062
py
5
__init__.py
5
0.621469
0.621469
0
59
17
60
mirumee/saleor
11,802,570,135,609
acd6fec63c7f9692aecbed15e26af690f8aceb23
4d2ecebad4a5144a70ad8e565215b153ef344452
/saleor/plugins/webhook/tests/test_tasks.py
9705ea5eef80a8eff0c0c767eb3bf8713104b35e
[ "BSD-3-Clause" ]
permissive
https://github.com/mirumee/saleor
6f28ded7e2eb354e9442d7b9c26f6c4c1c3a719f
1dbde699aaf0a2a7b80b968a76a079f16757c0f6
refs/heads/main
2023-09-03T02:12:00.604910
2023-03-29T07:05:15
2023-03-29T07:05:15
8,162,715
15,337
5,864
null
null
null
null
null
null
null
null
null
null
null
null
null
import json from decimal import Decimal from unittest import mock import pytest from django.utils import timezone from freezegun import freeze_time from graphene import Node from ....core import EventDeliveryStatus from ....core.models import EventDelivery, EventPayload from ....payment import TransactionEventType from ....payment.interface import TransactionActionData from ....payment.models import TransactionEvent from ....payment.transaction_item_calculations import recalculate_transaction_amounts from ....tests.utils import flush_post_commit_hooks from ....webhook.event_types import WebhookEventSyncType from ....webhook.payloads import generate_transaction_action_request_payload from ..tasks import handle_transaction_request_task, trigger_transaction_request @pytest.fixture def mocked_webhook_response(): mocked_post_response = mock.Mock() mocked_post_response.text = json.dumps({"pspReference": "123"}) mocked_post_response.headers = [] mocked_post_response.status_code = 200 mocked_post_response.ok = True mocked_post_response.content = json.dumps({"pspReference": "123"}) mocked_post_response.elapsed.total_seconds = lambda: 1 # noqa: E731 return mocked_post_response @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.handle_transaction_request_task.delay") def test_trigger_transaction_request( mocked_task, transaction_item_created_by_app, staff_user, permission_manage_payments, app, ): # given event = transaction_item_created_by_app.events.create( type=TransactionEventType.REFUND_REQUEST ) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url="http://localhost:3000/" ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED) transaction_data = TransactionActionData( transaction=transaction_item_created_by_app, action_type="refund", action_value=Decimal("10.00"), event=event, transaction_app_owner=app, ) # when trigger_transaction_request( transaction_data, WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED, staff_user ) # then flush_post_commit_hooks() generated_payload = EventPayload.objects.first() generated_delivery = EventDelivery.objects.first() assert generated_payload.payload == generate_transaction_action_request_payload( transaction_data, staff_user ) assert generated_delivery.status == EventDeliveryStatus.PENDING assert ( generated_delivery.event_type == WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED ) assert generated_delivery.webhook == webhook assert generated_delivery.payload == generated_payload mocked_task.assert_called_once_with(generated_delivery.id, app.name, event.id) @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.handle_transaction_request_task.delay") def test_trigger_transaction_request_with_webhook_subscription( mocked_task, transaction_item_created_by_app, staff_user, permission_manage_payments, app, ): # given subscription = """ subscription{ event{ ...on TransactionRefundRequested{ transaction{ id } action{ amount actionType } } } } """ event = transaction_item_created_by_app.events.create( type=TransactionEventType.REFUND_REQUEST ) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url="http://localhost:3000/", subscription_query=subscription, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED) transaction_data = TransactionActionData( transaction=transaction_item_created_by_app, action_type="refund", action_value=Decimal("10.00"), event=event, transaction_app_owner=app, ) # when trigger_transaction_request( transaction_data, WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED, staff_user ) # then flush_post_commit_hooks() generated_payload = EventPayload.objects.first() generated_delivery = EventDelivery.objects.first() assert generated_payload assert generated_delivery assert json.loads(generated_payload.payload) == { "transaction": { "id": Node.to_global_id("TransactionItem", transaction_data.transaction.id), }, "action": {"amount": 10.0, "actionType": "REFUND"}, } assert generated_delivery.status == EventDeliveryStatus.PENDING assert ( generated_delivery.event_type == WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED ) assert generated_delivery.webhook == webhook assert generated_delivery.payload == generated_payload mocked_task.assert_called_once_with(generated_delivery.id, app.name, event.id) @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_with_only_psp_reference( mocked_post_request, transaction_item_generator, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given transaction = transaction_item_generator() expected_psp_reference = "psp:ref:123" mocked_webhook_response.text = json.dumps({"pspReference": expected_psp_reference}) mocked_webhook_response.content = json.dumps( {"pspReference": expected_psp_reference} ) mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" event = transaction.events.create(type=TransactionEventType.REFUND_REQUEST) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED) transaction_data = TransactionActionData( transaction=transaction, action_type="refund", action_value=Decimal("10.00"), event=event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then assert TransactionEvent.objects.count() == 1 event.refresh_from_db() assert event.psp_reference == expected_psp_reference mocked_post_request.assert_called_once_with( target_url, data=payload.encode("utf-8"), headers=mock.ANY, timeout=mock.ANY ) @pytest.mark.parametrize("status_code", [500, 501, 510]) @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.handle_webhook_retry") @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_with_server_error( mocked_post_request, mocked_webhook_retry, status_code, transaction_item_created_by_app, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given mocked_webhook_response.status_code = status_code mocked_webhook_response.text = "" mocked_webhook_response.content = "" mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" event = transaction_item_created_by_app.events.create( type=TransactionEventType.CHARGE_REQUEST ) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_CHARGE_REQUESTED) transaction_data = TransactionActionData( transaction=transaction_item_created_by_app, action_type="refund", action_value=Decimal("10.00"), event=event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_CHARGE_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then assert mocked_webhook_retry.called @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_with_missing_psp_reference( mocked_post_request, transaction_item_created_by_app, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given mocked_webhook_response.text = "{}" mocked_webhook_response.content = "{}" mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" event = transaction_item_created_by_app.events.create( type=TransactionEventType.REFUND_REQUEST ) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED) transaction_data = TransactionActionData( transaction=transaction_item_created_by_app, action_type="refund", action_value=Decimal("10.00"), event=event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then assert ( TransactionEvent.objects.filter( type=TransactionEventType.REFUND_FAILURE ).count() == 1 ) assert ( TransactionEvent.objects.filter( type=TransactionEventType.REFUND_REQUEST ).count() == 1 ) failure_event = TransactionEvent.objects.filter( type=TransactionEventType.REFUND_FAILURE ).first() event.refresh_from_db() assert event.psp_reference is None assert failure_event.amount_value == event.amount_value assert failure_event.transaction_id == event.transaction_id mocked_post_request.assert_called_once_with( target_url, data=payload.encode("utf-8"), headers=mock.ANY, timeout=mock.ANY ) @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_with_missing_required_event_field( mocked_post_request, transaction_item_created_by_app, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given expected_psp_reference = "psp:123:111" mocked_webhook_response.text = json.dumps( {"pspReference": expected_psp_reference, "amount": 12.00} ) mocked_webhook_response.content = json.dumps( {"pspReference": expected_psp_reference, "amount": 12.00} ) mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" event = transaction_item_created_by_app.events.create( type=TransactionEventType.REFUND_REQUEST ) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED) transaction_data = TransactionActionData( transaction=transaction_item_created_by_app, action_type="refund", action_value=Decimal("10.00"), event=event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then assert ( TransactionEvent.objects.filter( type=TransactionEventType.REFUND_FAILURE ).count() == 1 ) assert ( TransactionEvent.objects.filter( type=TransactionEventType.REFUND_REQUEST ).count() == 1 ) failure_event = TransactionEvent.objects.filter( type=TransactionEventType.REFUND_FAILURE ).first() event.refresh_from_db() assert event.psp_reference is None assert failure_event.amount_value == event.amount_value assert failure_event.transaction_id == event.transaction_id mocked_post_request.assert_called_once_with( target_url, data=payload.encode("utf-8"), headers=mock.ANY, timeout=mock.ANY ) @freeze_time("2022-06-11 12:50") @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_with_result_event( mocked_post_request, transaction_item_generator, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given transaction = transaction_item_generator() request_psp_reference = "psp:123:111" event_amount = 12.00 event_type = TransactionEventType.CHARGE_SUCCESS event_time = "2022-11-18T13:25:58.169685+00:00" event_url = "http://localhost:3000/event/ref123" event_cause = "No cause" response_payload = { "pspReference": request_psp_reference, "amount": event_amount, "result": event_type.upper(), "time": event_time, "externalUrl": event_url, "message": event_cause, } mocked_webhook_response.text = json.dumps(response_payload) mocked_webhook_response.content = json.dumps(response_payload) mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" request_event = transaction.events.create(type=TransactionEventType.CHARGE_REQUEST) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_CHARGE_REQUESTED) transaction_data = TransactionActionData( transaction=transaction, action_type="refund", action_value=Decimal("10.00"), event=request_event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_CHARGE_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then assert TransactionEvent.objects.all().count() == 2 assert ( TransactionEvent.objects.filter( type=TransactionEventType.CHARGE_REQUEST ).count() == 1 ) assert ( TransactionEvent.objects.filter( type=TransactionEventType.CHARGE_SUCCESS ).count() == 1 ) success_event = TransactionEvent.objects.filter( type=TransactionEventType.CHARGE_SUCCESS ).first() assert success_event request_event.refresh_from_db() assert request_event.psp_reference == request_psp_reference assert success_event.psp_reference == request_psp_reference assert success_event.amount_value == event_amount assert success_event.created_at.isoformat() == event_time assert success_event.external_url == event_url assert success_event.message == event_cause mocked_post_request.assert_called_once_with( target_url, data=payload.encode("utf-8"), headers=mock.ANY, timeout=mock.ANY ) @freeze_time("2022-06-11T17:50:00+00:00") @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_with_only_required_fields_for_result_event( mocked_post_request, transaction_item_generator, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given transaction = transaction_item_generator() request_psp_reference = "psp:123:111" request_event = transaction.events.create(type=TransactionEventType.REFUND_REQUEST) response_payload = { "pspReference": request_psp_reference, "result": TransactionEventType.REFUND_SUCCESS.upper(), "amount": str(request_event.amount_value), } mocked_webhook_response.text = json.dumps(response_payload) mocked_webhook_response.content = json.dumps(response_payload) mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED) transaction_data = TransactionActionData( transaction=transaction, action_type="refund", action_value=Decimal("10.00"), event=request_event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_REFUND_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then assert TransactionEvent.objects.all().count() == 2 assert ( TransactionEvent.objects.filter( type=TransactionEventType.REFUND_REQUEST ).count() == 1 ) assert ( TransactionEvent.objects.filter( type=TransactionEventType.REFUND_SUCCESS ).count() == 1 ) success_event = TransactionEvent.objects.filter( type=TransactionEventType.REFUND_SUCCESS ).first() request_event.refresh_from_db() assert success_event assert request_event.psp_reference == request_psp_reference assert success_event.type == TransactionEventType.REFUND_SUCCESS assert success_event.psp_reference == request_psp_reference assert success_event.amount_value == request_event.amount_value assert success_event.created_at == timezone.now() assert success_event.external_url == "" assert success_event.message == "" mocked_post_request.assert_called_once_with( target_url, data=payload.encode("utf-8"), headers=mock.ANY, timeout=mock.ANY ) @freeze_time("2022-06-11 12:50") @mock.patch( "saleor.payment.utils.recalculate_transaction_amounts", wraps=recalculate_transaction_amounts, ) @mock.patch("saleor.plugins.webhook.tasks.requests.post") def test_handle_transaction_request_task_calls_recalculation_of_amounts( mocked_post_request, mocked_recalculation, transaction_item_generator, permission_manage_payments, staff_user, mocked_webhook_response, app, ): # given transaction = transaction_item_generator() request_psp_reference = "psp:123:111" event_amount = 12.00 event_type = TransactionEventType.CHARGE_SUCCESS event_time = "2022-11-18T13:25:58.169685+00:00" event_url = "http://localhost:3000/event/ref123" event_cause = "No cause" response_payload = { "pspReference": request_psp_reference, "amount": event_amount, "result": event_type.upper(), "time": event_time, "externalUrl": event_url, "message": event_cause, } mocked_webhook_response.text = json.dumps(response_payload) mocked_webhook_response.content = json.dumps(response_payload) mocked_post_request.return_value = mocked_webhook_response target_url = "http://localhost:3000/" request_event = transaction.events.create(type=TransactionEventType.CHARGE_REQUEST) app.permissions.set([permission_manage_payments]) webhook = app.webhooks.create( name="webhook", is_active=True, target_url=target_url, ) webhook.events.create(event_type=WebhookEventSyncType.TRANSACTION_CHARGE_REQUESTED) transaction_data = TransactionActionData( transaction=transaction, action_type="charge", action_value=Decimal("12.00"), event=request_event, transaction_app_owner=app, ) payload = generate_transaction_action_request_payload(transaction_data, staff_user) event_payload = EventPayload.objects.create(payload=payload) delivery = EventDelivery.objects.create( status=EventDeliveryStatus.PENDING, event_type=WebhookEventSyncType.TRANSACTION_CHARGE_REQUESTED, payload=event_payload, webhook=webhook, ) # when handle_transaction_request_task(delivery.id, app.name, transaction_data.event.id) # then mocked_recalculation.assert_called_once_with(transaction) transaction.refresh_from_db() assert transaction.charged_value == event_amount
UTF-8
Python
false
false
22,515
py
67
test_tasks.py
61
0.688696
0.673951
0
686
31.8207
88
darkreapyre/NeOS-0.0.0-00
6,871,947,703,361
8b01738e8bcaacf562e80d38955ea27024e968be
4fe0c90217656ff9f449c1278a9956d71e5769b0
/core/clients/python/api_bindings/cb2_api/objects/node_role.py
172f5911d9e9f64ce1184c058587447f2ca868f0
[ "Apache-2.0" ]
permissive
https://github.com/darkreapyre/NeOS-0.0.0-00
f7335935a058700cc7a3aff20fba203fa1d3d727
fdaddf3350edac74ee7c31f34843f0296d8e1d84
refs/heads/master
2016-09-06T20:34:18.263522
2014-10-12T00:54:17
2014-10-12T00:54:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright 2014, Dell # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cb2_api.endpoint import EndPoint from apiobject import ApiObject class Node_RoleEP(EndPoint): ''' https://github.com/opencrowbar/core/blob/master/doc/devguide/api/node_roles.md ''' __endpoint = "/api/v2/node_roles" __apiObjectType = "Node_Role" def __init__(self, session): self.session = session self.endpoint = Node_RoleEP.__endpoint super(Node_RoleEP, self).__init__(session) class Node_Role(ApiObject): ''' Node_role object ''' def __init__(self, json={}): self.available = None self.cohort = None self.updated_at = None self.created_at = None self.runlog = None self.order = None self.state = None self.node_id = None self.status = None self.run_count = None self.deployment_id = None self.role_id = None self.id = None self.__dict__ = json super(Node_Role, self).__init__() class Enums_Role(): ''' TODO '''
UTF-8
Python
false
false
1,648
py
490
node_role.py
58
0.615291
0.609223
0
56
27.964286
82
Walkerlikesfish/leetleet
523,986,042,418
0694183dbfaf47e38e96cce2a060beae85557bcf
3f39fd28584ede3e606438892eaa4e61e826f6c3
/leetPy/leet_131.py
aa6c34437c6028d41a23586f4dce5b1b0db4b585
[]
no_license
https://github.com/Walkerlikesfish/leetleet
d3d4e1a2554b0f1967f58819fde1cc1713d6df9a
477511951ce9a13ff87e1901559d999b7a9b38c7
refs/heads/master
2020-03-22T22:57:19.207535
2019-05-19T10:17:03
2019-05-19T10:17:03
140,780,989
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json class Solution(object): def partition(self, s): """ :type s: str :rtype: List[List[str]] """ self.n = len(s) self.res = [] self.s = s if not s: return [] if len(s) == 1: return [[s]] self.dfs(1, [0]) return self.res def is_hw(self, s): # print s i = 0 n = len(s) while i<n/2: if s[i] != s[n-1-i]: return False i+=1 return True def dfs(self, cn, cur_set): if cn == self.n: if len(cur_set)>1 and not self.is_hw(self.s[cur_set[-2]:cur_set[-1]]): return cur_set.append(cn) if not self.is_hw(self.s[cur_set[-2]:cur_set[-1]]): return else: cur_sp = [] for i in range(1, len(cur_set)): sub_str = self.s[cur_set[i-1]:cur_set[i]] cur_sp.append(sub_str) self.res.append(cur_sp) else: if len(cur_set) > 1: if not self.is_hw(self.s[cur_set[-2]:cur_set[-1]]): return new_set = list(cur_set) new_set.append(cn) self.dfs(cn+1, cur_set) self.dfs(cn+1, new_set) def stringToString(input): return input[1:-1].decode('string_escape') def string2dArrayToString(input): return json.dumps(input) def main(): import sys while True: try: line = sys.stdin.readline().rstrip('\n') s = stringToString(line) ret = Solution().partition(s) out = string2dArrayToString(ret) print out except StopIteration: break if __name__ == '__main__': main()
UTF-8
Python
false
false
1,834
py
182
leet_131.py
181
0.442203
0.429662
0
79
22.227848
82
quynhdtn/WECDenoiser
19,172,734,044,574
4737faf2209cc38efeb777b3ca78a144383d0222
30752189f5bb42581b4fcb3ee9721903427ee767
/nn/Connection.py
c529180d7e649beb9925a21c6f5a4ece0ba1c6de
[]
no_license
https://github.com/quynhdtn/WECDenoiser
7511c87af3a226beee1bd70165845bb343e94f22
bd57d8a2408926562961f23a15bb4c604a375a73
refs/heads/master
2021-01-10T04:50:26.145725
2016-03-04T09:34:58
2016-03-04T09:34:58
53,056,487
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from nn.Functions import DotTransferFunction, SigmoidActivateFunction, WeightInit import theano.tensor as T import theano as th import numpy as np __author__ = 'quynhdo' # implement the connection between two layers class Connection: def __init__(self, scr, dst, transfer_func=DotTransferFunction, activate_func=SigmoidActivateFunction, use_bias=True, idx="", initial_w=None, initial_b=None, w_lamda=1.0, rng = None): ''' :param scr: source layers, can be a single layer of a list of layer, if is a list of layer then we have to concatenate the units of the source layers to process in the connection :param dst: destination layer :param transfer_func: usually the Dot func to get the net input :param activate_func: activate function :param use_bias: :param idx: index of the connnection :param initial_w: initial value for weights :param initial_b: initial value for bias :param w_lamda: initial value to init the weight :return: ''' self.scr = scr # source layer self.dst = dst # destination layer self.activate_func = activate_func # transfer function self.transfer_func = transfer_func # activate function self.W = None self.b = None if isinstance(self.scr, list): # if there are more than one src layers self.size_in = np.sum([l.size for l in self.scr]) # then input size of the connection equals to the sum of all scr layers else: self.size_in = self.scr.size self.size_out = self.dst.size if initial_w is not None: self.W = initial_w else: self.W = th.shared(value=np.asarray(WeightInit(self.size_in, self.size_out, w_lamda, rng), dtype=th.config.floatX) , name='W'+idx, borrow=True) self.params = [self.W] if use_bias: if initial_b is not None: self.b = initial_b else: self.b = th.shared(value=np.zeros(dst.size, dtype=th.config.floatX), name="b" + idx, borrow=True ) self.params.append(self.b) # Start the connection, calculate the unit values of the dst layer from the scr layer def start(self): xx = None start_sparse_idx = -1 if isinstance(self.scr, list): # we only allow sparse layers to occur at the end... for i in range(len(self.scr)): if th.sparse.basic._is_sparse_variable(self.scr[i].units): start_sparse_idx = i break if start_sparse_idx > 0: xx = T.concatenate([self.scr[i].units for i in range (start_sparse_idx)], axis=1) xx = th.sparse.hstack((th.sparse.csr_from_dense(xx),self.scr[start_sparse_idx].units)) for j in range(start_sparse_idx+1,len(self.scr)): xx = th.sparse.hstack(xx, self.scr[j].units) if start_sparse_idx == 0: xx = self.scr[0].units for j in range(1,len(self.scr)): xx = th.sparse.hstack(xx, self.scr[j].units) if start_sparse_idx < 0: xx = T.concatenate([self.scr[i].units for i in range (len(self.scr))], axis=1) else: xx = self.scr.units self.dst.units = self.activate_func(self.transfer_func(xx, self.W, self.b)) def getOutput(self, x): ''' only work when x is a single varibale, not a list :param x: :return: ''' return self.activate_func(self.transfer_func(x, self.W, self.b))
UTF-8
Python
false
false
3,685
py
9
Connection.py
7
0.584532
0.581547
0
92
39.054348
136
fandipj98/Social-Media-Prediction
17,154,099,393,799
8114d2702f2c3f40443429410c1aeb0204b34e12
16404ea73768335255abe89adea850176255a3ef
/kmeans.py
393fb457aed4152c7970266f36b73667ddee4922
[]
no_license
https://github.com/fandipj98/Social-Media-Prediction
7daf5a13d73493b6c1cb4ad4eff370eee203bca4
ffa9fc1b781cdc9d55ca118351fe05b123b2a162
refs/heads/master
2022-03-27T17:17:56.418339
2019-12-11T14:14:58
2019-12-11T14:14:58
215,304,469
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.datasets.samples_generator import make_blobs from sklearn.cluster import KMeans import csv trainingSet=[] with open('data_km.csv','r') as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for x in range(10000): trainingSet.append(dataset[x]) with open('data_tugas_km.csv', 'r') as csvfileTest: linesTest = csv.reader(csvfileTest) datasetTest = list(linesTest) # def predict(data, centroids): # distances = [] # for unit in data: # for center in centroids: # distances.append(np.sum((unit - center) ** 2)) # distances = np.reshape(distances, data.shape) # closest_centroid = [np.argmin(dist) for dist in distances] # print(closest_centroid) trainData = np.array(trainingSet) # print(trainData) categoryGet = trainData[:,0].astype(float) # print(categoryGet) subcategoryGet = trainData[:,1].astype(float) # print(subcategoryGet) labelGet = trainData[:,2].astype(float) # print(labelGet) cscGet = trainData[:,0:2].astype(float) testData = np.array(datasetTest) # print(testData) testDataGet = testData[:,0:2].astype(float) # print(testDataGet) # plt.scatter(subcategoryGet, labelGet) # plt.show() kmeans = KMeans(n_clusters=3).fit(cscGet) centroid = kmeans.cluster_centers_ print(centroid) # TRAIN DATA # plt.scatter(cscGet[:,0], cscGet[:,1]) # plt.scatter(centroid[:,0], centroid[:,1], marker='*', c='g', s=150) # plt.show() # predict(testDataGet, centroid) test = kmeans.predict(testDataGet) print(test) closest_centroid = [] for x in range(len(testDataGet)): diff = centroid - testDataGet[x,:] # print(diff) dist = np.sqrt(np.sum(diff**2, axis=-1)) # print(dist) closest_centroid.append(centroid[np.argmin(dist),]) # print(centroid) for x in range(len(closest_centroid)): print (closest_centroid[x]) plt.scatter(testDataGet[:,0], testDataGet[:,1]) plt.scatter(centroid[:,0], centroid[:,1], marker='*', c='g', s=150) plt.show() # Cari Center # wcss = [] # for i in range(1, 11): # kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=1000, n_init=10, random_state=0) # kmeans.fit(cscGet) # wcss.append(kmeans.inertia_) # plt.plot(range(1, 11), wcss) # plt.title('Elbow Method') # plt.xlabel('Number of clusters') # plt.ylabel('WCSS') # plt.show() # kmeans = KMeans(n_clusters=4, init='k-means++', max_iter=300, n_init=10, random_state=0) # pred_y = kmeans.fit_predict(categoryGet) # print(pred_y)
UTF-8
Python
false
false
2,546
py
41
kmeans.py
17
0.67282
0.653181
0
93
26.365591
95
raczandras/szkriptnyelvek
5,282,809,776,829
a65a9dad17587e5ca8d7d411f36b675be1d08359
2384ecfba2f96b44aad463360444389b1c517263
/HF/8/hangrendenum.py
8fc40851777952d835ac1570e41811f456280252
[]
no_license
https://github.com/raczandras/szkriptnyelvek
9176f511ec3552385176dd122459633d41d0db8f
efe3dec3288f71e18c48150a9c849ef34fffb766
refs/heads/master
2023-01-11T02:29:03.023309
2020-11-12T14:33:29
2020-11-12T14:33:29
296,817,986
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 from enum import Enum class Beturendek(Enum): MELYHANGOK = 'aáoóuú' MAGASHANGOK = 'eéiíöőüű' class Hangrendek(Enum): SEMMILYEN = 0 MÉLY = 1 MAGAS = 2 VEGYES = 3 def hangrend(word): mely = False magas = False eredmeny = 0 for c in word: if c in Beturendek.MELYHANGOK.value and not bool(mely): mely = True eredmeny += 1 elif c in Beturendek.MAGASHANGOK.value and not bool(magas): magas = True eredmeny += 2 return eredmeny def main(): words = ["ablak", "erkély", "kisvasút", "magas", "mély", "zrt"] for w in words: print( w + " -> " + Hangrendek(hangrend(w)).name ) if __name__ == "__main__": main()
UTF-8
Python
false
false
771
py
64
hangrendenum.py
62
0.562005
0.551451
0
39
18.435897
67
rivrproject/rivr
18,511,309,090,514
b39c45082fdf514cf7a806179ccca4128d281b56
5f38be02781b0ee6bd3bf09ad4bf77c3b525e212
/rivr/router.py
aaef833ea63c49f9a103e797a0b7d8a22d737f27
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "BSD-2-Clause" ]
permissive
https://github.com/rivrproject/rivr
a474c7bf24a8a0076fb75129ee9a657421532ac8
b4f7eb481cc28ae48169f1d3982b896b7cfd5c91
refs/heads/master
2021-07-03T01:51:48.676645
2020-11-05T18:26:03
2020-11-05T18:26:03
506,574
3
0
NOASSERTION
false
2020-10-10T15:31:51
2010-02-07T13:49:53
2020-10-10T13:51:53
2020-10-10T13:52:08
253
10
1
7
Python
false
false
import re from typing import Any, Callable, Dict, Iterable, Optional, Tuple from rivr.http import Http404, Request, Response, ResponsePermanentRedirect from rivr.importlib import import_module class Resolver404(Http404): pass class ViewDoesNotExist(Exception): pass class RegexURL(object): def __init__(self, regex: str): self.regex = re.compile(regex, re.UNICODE) def resolve( self, path: str ) -> Optional[Tuple[Callable, Iterable[Any], Dict[str, Any]]]: match = self.regex.search(path) if match: return self.match_found(path, match) return None def match_found( self, path: str, match ) -> Optional[Tuple[Callable, Iterable[Any], Dict[str, Any]]]: raise NotImplementedError class RegexURLPattern(RegexURL): def __init__( self, regex: str, callback: Callable[..., Response], kwargs={}, name: Optional[str] = None, prefix: Optional[str] = None, ): super(RegexURLPattern, self).__init__(regex) if callable(callback): self._callback = callback else: self._callback = None self._callback_str = callback self.default_kwargs = kwargs self.name = name self.add_prefix(prefix) def match_found( self, path: str, match ) -> Optional[Tuple[Callable, Iterable[Any], Dict[str, Any]]]: kwargs = match.groupdict() if kwargs: args: Iterable = tuple() else: args = match.groups() kwargs.update(self.default_kwargs) return self.callback, args, kwargs @property def callback(self): if self._callback is not None: return self._callback try: self._callback = import_module(self._callback_str) except ImportError as e: raise ViewDoesNotExist( 'Could not import %s (%s)' % (self._callback_str, str(e)) ) return self._callback def add_prefix(self, prefix): if prefix and hasattr(self, '_callback_str'): self._callback_str = prefix + '.' + self._callback_str class RegexURLResolver(RegexURL): def __init__(self, regex, router, kwargs={}): super(RegexURLResolver, self).__init__(regex) if callable(router): self._router = router else: self._router = None self._router_str = router self.default_kwargs = kwargs def match_found( self, path: str, match ) -> Optional[Tuple[Callable, Iterable[Any], Dict[str, Any]]]: new_path = path[match.end() :] try: callback, args, kwargs = self.router.resolve(new_path) except Http404: return None kwargs.update(self.default_kwargs) return callback, args, kwargs @property def router(self): if self._router is not None: return self._router try: self._router = import_module(self._router_str) except ImportError as e: raise Http404( 'Could not import %s. Error was: %s' % (self._router_str, str(e)) ) return self._router class BaseRouter(object): def __init__(self, *urls): """ Router takes URLs which you can register on creation. Example:: router = rivr.Router( (r'^$', index), (r'^test/$', test), ) """ self.urlpatterns = [] try: if isinstance(urls[0], str): prefix = urls[0] urls = urls[1:] else: prefix = None except IndexError: prefix = None for t in urls: if isinstance(t, (list, tuple)): t = url(*t) if prefix and hasattr(t, 'add_prefix'): t.add_prefix(prefix) self.urlpatterns.append(t) def __iadd__(self, router): self.urlpatterns.__iadd__(router.urlpatterns) def __isub__(self, router): self.urlpatterns.__isub__(router.urlpatterns) def append(self, url): self.urlpatterns.append(url) def register(self, *t): """ Register a URL pattern with a view. This can either be used as a decorator, or it can be used as a method with a view. Decorator Example:: @router.register(r'^$') def view(request): return Response() View Example:: router.register(r'^$', view) """ if isinstance(t, (list, tuple)): if len(t) == 1: def func(view): self.register(t[0], view) return view return func t = url(*t) self.append(t) class Router(BaseRouter): append_slash = True """ When append_slash is True, if the request URL does not match any patterns in the router and it doesn't end in a slash. The router will HTTP redirect any issues to the same URL with a slash appended. """ def resolve(self, path: str) -> Tuple[Callable, Iterable[Any], Dict[str, Any]]: for pattern in self.urlpatterns: result = pattern.resolve(path) if result is not None: return result raise Resolver404('No URL pattern matched.') def __call__(self, request: Request) -> Response: if self.append_slash and (not request.path.endswith('/')): if (not self.is_valid_path(request.path)) and self.is_valid_path( request.path + '/' ): return ResponsePermanentRedirect(request.path + '/') result = RegexURLResolver(r'^/', self).resolve(request.path) if result: callback, args, kwargs = result return callback(request, *args, **kwargs) raise Resolver404('No URL pattern matched.') def is_valid_path(self, path: str) -> bool: try: self.resolve(path) return True except Resolver404: return False class Domain(BaseRouter): APPEND_SLASH = True def resolve(self, path: str) -> Tuple[Callable, Iterable[Any], Dict[str, Any]]: for pattern in self.urlpatterns: result = pattern.resolve(path) if result is not None: return result raise Resolver404('No URL pattern matched.') def __call__(self, request: Request) -> Response: host = ':'.join(request.host.split(':')[:-1]) url = host + request.path if self.APPEND_SLASH and (not url.endswith('/')): if (not self.is_valid_url(url)) and self.is_valid_url(url + '/'): return ResponsePermanentRedirect(url + '/') result = self.resolve(url) if result: callback, args, kwargs = result return callback(request, *args, **kwargs) raise Resolver404('No URL pattern matched.') def is_valid_url(self, path: str) -> bool: try: self.resolve(path) return True except Resolver404: return False # Shortcuts def url(regex, view: Callable[..., Response], kwargs={}, name=None, prefix=None): if isinstance(view, list): return RegexURLResolver(regex, view[0], kwargs) as_view = getattr(view, 'as_view', None) if as_view and callable(as_view): view = as_view() return RegexURLPattern(regex, view, kwargs, name, prefix) def include(router): return [router]
UTF-8
Python
false
false
7,652
py
47
router.py
34
0.553973
0.548745
0
282
26.134752
83
XRSHEERAN/Algorithms
8,650,064,181,024
285b475c08e748f2aa69671eb6f98ee85622c3d4
781c65db3fadb9cb9511468dc0c17e9d985297f4
/TopInterview/4SumII.py.py
76ad7414d0a84c7a66c2081dded74ac8d8dc4478
[]
no_license
https://github.com/XRSHEERAN/Algorithms
37b38a8e80c6e1b28fca5b386f654f4a3851544c
da60a684655e9c890bbf8ee0c234fc2432ad9ec1
refs/heads/master
2018-11-02T04:52:36.434798
2018-10-04T18:26:14
2018-10-04T18:26:14
113,063,020
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class Solution: def fourSumCount(self, A, B, C, D): """ :type A: List[int] :type B: List[int] :type C: List[int] :type D: List[int] :rtype: int """ mapping={} for i in A: for j in B: temp=i+j if temp not in mapping: mapping[temp]=1 else: mapping[temp]+=1 mapping2={} for i in C: for j in D: temp=i+j if temp not in mapping2: mapping2[temp]=1 else: mapping2[temp]+=1 count=0 for i in mapping: if -1*i in mapping2: count+=mapping[i]*mapping2[-1*i] return count
UTF-8
Python
false
false
812
py
51
4SumII.py.py
46
0.380542
0.364532
0
31
25.193548
48
tiduswr/Algoritimos_P1_UEPB_CCEA_CAMPUS_VII
17,617,955,862,707
e8253915fe86f37f442bafd2fc6f8096964b1e79
8394f555a2cba1e90b2cbdd85ced0346487be8ac
/Lista 06/lista06ex08.py
1829c10b8e8e552e9b710cf1205057c943a5d7b6
[]
no_license
https://github.com/tiduswr/Algoritimos_P1_UEPB_CCEA_CAMPUS_VII
f44ad786db73aa9009dd6196e364ee363cabb692
8b74b0782a73cea32b5382a2f893615afdf16017
refs/heads/master
2022-03-10T18:22:44.697323
2019-11-21T05:09:18
2019-11-21T05:09:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
lin = 3 col = 6 matriz = [] somaimpar = 0 media2_4 = 0 print('=' * 50) print('{:^50}'.format('Questão 08 Lista 06!')) print('=' * 50) for i in range(lin): matriz.append([0] * col) for i in range(lin): for j in range(col): matriz[i][j] = float(input('Matriz[{}][{}]: '.format(i+1, j+1))) for i in range(lin): for j in range(col): if ((j + 1) % 2) > 0: somaimpar = somaimpar + matriz[i][j] if (j + 1) == 2 or (j + 1) == 4: media2_4 = media2_4 + matriz[i][j] if (j + 1) == 6: matriz[i][j] = matriz[i][0] + matriz[i][1] print('=' * 50) print('{:^50}'.format('RESULTADOS')) print('=' * 50) print('\n* A soma de todos os elementos das colunas Impares\né igual a: {:.2f}'.format(somaimpar)) print('* A media de todos os elementos da Segunda e Quarta\ncoluna é igual a: {:.2f}\n'.format(media2_4/(lin*2))) print('Matriz Modificada:\n') for j in range(col): print('|','{:^8}'.format('Coluna_'+str(j+1)), end = ' ') if (j + 1) == col: print('|', end = '') print() for i in range(lin): for j in range(col): print('|', '{:^8.2f}'.format(matriz[i][j]), end = ' ') if (j + 1) == col: print('|', end = '') print()
UTF-8
Python
false
false
1,242
py
122
lista06ex08.py
122
0.508475
0.467312
0
47
25.382979
113
personalbrian/self-playing-snake
7,713,761,265,469
209465d3b8fd72e9ba1a50b8679788330aab0065
4cc751e4dc2688af3709c9ed26075f627ea33944
/venv/Lib/site-packages/IPython/terminal/tests/test_interactivshell.py
5a00780cc6cefc9720119a8c3ab708a3f82fd1f4
[]
no_license
https://github.com/personalbrian/self-playing-snake
6a485328292d4971636cc4d033839ffc16d7f6ec
db4cf9b88b73c2494c35e599c2c23faaff7a3b73
refs/heads/main
2023-04-17T08:11:06.501719
2021-05-03T08:06:32
2021-05-03T08:06:32
362,734,451
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
version https://git-lfs.github.com/spec/v1 oid sha256:34ac6aa27eff0ff5eb776a5538fc557ff5ee671b2f53cf1c04680380340d80a2 size 6888
UTF-8
Python
false
false
129
py
3,892
test_interactivshell.py
3,212
0.883721
0.51938
0
3
42
75
gesco/TG_PY
16,346,645,547,972
39b3df6d2ca6edeb7e30b6fabe81d87dcd88ec3d
99a5d68411c02a78a0bb1c0a4e4d167116aca413
/02_InputProcessingAndOutput/program_0211.py
bb9b344692d31892e9456758692e29a1fd216ee9
[]
no_license
https://github.com/gesco/TG_PY
19c3fcada4d530ff065e71e12f281ee56dd08eb4
af80f15da27448603d1fececbf71b620ff3d77a5
refs/heads/main
2023-02-12T22:20:52.085426
2021-01-09T16:52:53
2021-01-09T16:52:53
323,755,566
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Create variables to reference two strings. first_name = 'Kathryn' last_name = 'Marino' # Display the values referenced by the variables. print(first_name, last_nameg) # OUTPUT # Kathryn Marino
UTF-8
Python
false
false
197
py
7
program_0211.py
7
0.751269
0.751269
0
9
20.888889
49
zengxio/auto
13,348,758,365,297
dbfd5fdc0ae3943dbfb76e94aa290cb2cb1aaa11
1da5738176c97cc931abbc8abb9fbe1953389dc5
/autoclient/Course_system/lib/Login.py
01936eb56d8f311f6eb6224b81fbf8c602da6681
[]
no_license
https://github.com/zengxio/auto
ed25db07c513e3e3c7cec3125e39c4429de4d52b
14f101afe97ac1257a7afc804a9aac75de3a0376
refs/heads/master
2020-03-12T21:25:15.301984
2018-04-24T09:16:15
2018-04-24T09:16:15
130,827,649
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python #coding:utf-8 from Course_system.lib import Open_file class login(): def __init__(self,username,password): self.username=username self.password=password def login_func(self): userdata=Open_file.Open_file_class() read_file=userdata.read() if self.username in read_file and read_file[self.username].get('password') : if self.password==read_file[self.username]['password']: if not read_file[self.username]['login']: read_file[self.username]['login']=True print('login successfully') flag=True return read_file[self.username] else: print('password error,please re-enter') else: print('user is not exists,please re-enter') if __name__ == '__main__': l=login('zxafy','123456') print(l.login_func())
UTF-8
Python
false
false
925
py
21
Login.py
21
0.571892
0.564324
0
31
28.870968
84
srio/paper-guigay-resources
12,893,491,858,132
2aedc8f56fb337227192ae45c2166de5042a0a1e
cd028ec879c4c1538aaee528d706817f5ca68412
/scripts/crystal_data.py
e830fabae8e463b3d0712b0ca584b9da0c9ab77e
[ "MIT" ]
permissive
https://github.com/srio/paper-guigay-resources
5d0b0f06618eb941d7a5c34079e2c854a334e9b9
4c7d379a975b5ae034c5595c6222d5ff94c39460
refs/heads/main
2023-03-29T23:44:29.167969
2021-04-09T08:32:44
2021-04-09T08:32:44
332,707,764
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# # example of using xraylib to get crystal data # # see: # http://dx.doi.org/10.1016/j.sab.2011.09.011 (paper) # https://github.com/tschoonj/xraylib/ (code) # http://lvserver.ugent.be/xraylib-web (web interface, but crystals not included!) # # # import block # import xraylib import numpy as np import scipy.constants.codata def get_crystal_data(crystal_id="Si",hkl=[1,1,1],photon_energy_in_keV=12.398,verbose=True): # # get crystal data for silicon crystal # cryst = xraylib.Crystal_GetCrystal(crystal_id) # print some info if verbose: print (" Unit cell dimensions [A] are %f %f %f" % (cryst['a'],cryst['b'],cryst['c'])) print (" Unit cell angles are %f %f %f" % (cryst['alpha'],cryst['beta'],cryst['gamma'])) print (" Unit cell volume [A] is %f" % (cryst['volume']) ) # # define miller indices and compute dSpacing # hh = hkl[0] kk = hkl[1] ll = hkl[2] debyeWaller = 1.0 rel_angle = 1.0 # ratio of (incident angle)/(bragg angle) -> we work at Bragg angle dspacing = xraylib.Crystal_dSpacing(cryst,hh,kk,ll ) if verbose: print("dspacing: %f A \n"%dspacing) # # define energy and get Bragg angle # ener = photon_energy_in_keV # 12.398 # keV braggAngle = xraylib.Bragg_angle(cryst,ener,hh,kk,ll ) if verbose: print("Bragg angle: %f degrees \n"%(braggAngle*180/np.pi)) # # get the structure factor (at a given energy) # f0 = xraylib.Crystal_F_H_StructureFactor(cryst, ener, 0, 0, 0, debyeWaller, 1.0) fH = xraylib.Crystal_F_H_StructureFactor(cryst, ener, hh, kk, ll, debyeWaller, 1.0) if verbose: print("f0: (%f , %f) \n"%(f0.real,f0.imag)) if verbose: print("fH: (%f , %f) \n"%(fH.real,fH.imag)) # # convert structure factor in chi (or psi) = - classical_e_radius wavelength^2 fH /(pi volume) # codata = scipy.constants.codata.physical_constants codata_c, tmp1, tmp2 = codata["speed of light in vacuum"] codata_h, tmp1, tmp2 = codata["Planck constant"] codata_ec, tmp1, tmp2 = codata["elementary charge"] codata_r, tmp1, tmp2 = codata["classical electron radius"] ev2meter = codata_h*codata_c/codata_ec wavelength = ev2meter/(ener*1e3) if verbose: print("Photon energy: %f keV \n"%ener) if verbose: print("Photon wavelength: %f A \n"%(1e10*wavelength)) volume = cryst['volume'] *1e-10*1e-10*1e-10 # volume of silicon unit cell in m^3 cte = - codata_r * wavelength*wavelength/(np.pi * volume) chi0 = cte*f0 chiH = cte*fH if verbose: print("chi0: (%e , %e) \n"%(chi0.real,chi0.imag)) if verbose: print("chiH: (%e , %e) \n"%(chiH.real,chiH.imag)) return braggAngle, np.conjugate(chi0), np.conjugate(chiH) if __name__ == "__main__": # theta, chi0, chiH = get_crystal_data("Si",hkl=[1,1,1],photon_energy_in_keV=12.398,verbose=True) theta, chi0, chiH = get_crystal_data("Si", hkl=[1, 1, 1], photon_energy_in_keV=17.0, verbose=True)
UTF-8
Python
false
false
3,017
py
16
crystal_data.py
2
0.623467
0.591316
0
92
31.793478
102
geohot/tinygrad
1,984,274,934,195
a4886f23fd169927c420ed7e9a4982f4a0e71f86
180aa89910e8c1fc65652b26fc6be9243e5ce96b
/models/resnet.py
972fb6aa4127f8077c4be630c34c0e57fb0bbe5d
[ "MIT" ]
permissive
https://github.com/geohot/tinygrad
04521e22deb0e4cb41c3662a00ed177a5701c81c
89b529c07f8ed1eae672faa6f2940c51a153ce0b
refs/heads/master
2023-08-26T17:27:47.942097
2023-08-26T13:34:15
2023-08-26T13:34:15
305,144,746
17,772
2,343
null
null
null
null
null
null
null
null
null
null
null
null
null
import tinygrad.nn as nn from extra.utils import get_child class BasicBlock: expansion = 1 def __init__(self, in_planes, planes, stride=1, groups=1, base_width=64): assert groups == 1 and base_width == 64, "BasicBlock only supports groups=1 and base_width=64" self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, stride=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.downsample = [] if stride != 1 or in_planes != self.expansion*planes: self.downsample = [ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ] def __call__(self, x): out = self.bn1(self.conv1(x)).relu() out = self.bn2(self.conv2(out)) out = out + x.sequential(self.downsample) out = out.relu() return out class Bottleneck: # NOTE: stride_in_1x1=False, this is the v1.5 variant expansion = 4 def __init__(self, in_planes, planes, stride=1, stride_in_1x1=False, groups=1, base_width=64): width = int(planes * (base_width / 64.0)) * groups # NOTE: the original implementation places stride at the first convolution (self.conv1), control with stride_in_1x1 self.conv1 = nn.Conv2d(in_planes, width, kernel_size=1, stride=stride if stride_in_1x1 else 1, bias=False) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, padding=1, stride=1 if stride_in_1x1 else stride, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, self.expansion*planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*planes) self.downsample = [] if stride != 1 or in_planes != self.expansion*planes: self.downsample = [ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ] def __call__(self, x): out = self.bn1(self.conv1(x)).relu() out = self.bn2(self.conv2(out)).relu() out = self.bn3(self.conv3(out)) out = out + x.sequential(self.downsample) out = out.relu() return out class ResNet: def __init__(self, num, num_classes=None, groups=1, width_per_group=64, stride_in_1x1=False): self.num = num self.block = { 18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck }[num] self.num_blocks = { 18: [2,2,2,2], 34: [3,4,6,3], 50: [3,4,6,3], 101: [3,4,23,3], 152: [3,8,36,3] }[num] self.in_planes = 64 self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, bias=False, padding=3) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(self.block, 64, self.num_blocks[0], stride=1, stride_in_1x1=stride_in_1x1) self.layer2 = self._make_layer(self.block, 128, self.num_blocks[1], stride=2, stride_in_1x1=stride_in_1x1) self.layer3 = self._make_layer(self.block, 256, self.num_blocks[2], stride=2, stride_in_1x1=stride_in_1x1) self.layer4 = self._make_layer(self.block, 512, self.num_blocks[3], stride=2, stride_in_1x1=stride_in_1x1) self.fc = nn.Linear(512 * self.block.expansion, num_classes) if num_classes is not None else None def _make_layer(self, block, planes, num_blocks, stride, stride_in_1x1): strides = [stride] + [1] * (num_blocks-1) layers = [] for stride in strides: if block == Bottleneck: layers.append(block(self.in_planes, planes, stride, stride_in_1x1, self.groups, self.base_width)) else: layers.append(block(self.in_planes, planes, stride, self.groups, self.base_width)) self.in_planes = planes * block.expansion return layers def forward(self, x): is_feature_only = self.fc is None if is_feature_only: features = [] out = self.bn1(self.conv1(x)).relu() out = out.pad2d([1,1,1,1]).max_pool2d((3,3), 2) out = out.sequential(self.layer1) if is_feature_only: features.append(out) out = out.sequential(self.layer2) if is_feature_only: features.append(out) out = out.sequential(self.layer3) if is_feature_only: features.append(out) out = out.sequential(self.layer4) if is_feature_only: features.append(out) if not is_feature_only: out = out.mean([2,3]) out = self.fc(out).log_softmax() return out return features def __call__(self, x): return self.forward(x) def load_from_pretrained(self): # TODO replace with fake torch load model_urls = { (18, 1, 64): 'https://download.pytorch.org/models/resnet18-5c106cde.pth', (34, 1, 64): 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', (50, 1, 64): 'https://download.pytorch.org/models/resnet50-19c8e357.pth', (50, 32, 4): 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', (101, 1, 64): 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', (152, 1, 64): 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } self.url = model_urls[(self.num, self.groups, self.base_width)] from torch.hub import load_state_dict_from_url state_dict = load_state_dict_from_url(self.url, progress=True) for k, v in state_dict.items(): obj = get_child(self, k) dat = v.detach().numpy() if 'fc.' in k and obj.shape != dat.shape: print("skipping fully connected layer") continue # Skip FC if transfer learning # TODO: remove or when #777 is merged assert obj.shape == dat.shape or (obj.shape == (1,) and dat.shape == ()), (k, obj.shape, dat.shape) obj.assign(dat) ResNet18 = lambda num_classes=1000: ResNet(18, num_classes=num_classes) ResNet34 = lambda num_classes=1000: ResNet(34, num_classes=num_classes) ResNet50 = lambda num_classes=1000: ResNet(50, num_classes=num_classes) ResNet101 = lambda num_classes=1000: ResNet(101, num_classes=num_classes) ResNet152 = lambda num_classes=1000: ResNet(152, num_classes=num_classes) ResNeXt50_32X4D = lambda num_classes=1000: ResNet(50, num_classes=num_classes, groups=32, width_per_group=4)
UTF-8
Python
false
false
6,300
py
181
resnet.py
166
0.658889
0.603333
0
153
40.183007
132
Dearyyyyy/TCG
19,078,244,762,876
4712940fadd388e623fe820845d58b82aa9c22cc
ba602dc67ad7bb50133aeb312f3c6c54627b3dec
/data/3921/AC_py/508183.py
dd42196616efcbb3891deb084f98ea74bdc2c237
[]
no_license
https://github.com/Dearyyyyy/TCG
0d21d89275906157372d775f33309ce337e6bc95
7b80de16de2d3f5d95a7c4ed95d45a9e38882e67
refs/heads/master
2020-12-27T23:19:44.845918
2020-02-04T01:59:23
2020-02-04T01:59:23
238,101,032
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding=utf-8 import sys def ss(s): if len(s)<2: return True if s[0]==s[-1]: return ss(s[1:-1]) else: return False n=int(input()) for i in range(n): s=[str(i) for i in input()] if ss(s)==True: print('YES') else: print('NO')
UTF-8
Python
false
false
288
py
898
508183.py
652
0.482639
0.461806
0
16
17.0625
31
andreishabanski/Labs
14,482,629,759,264
c2eda155c9391ececc646c432fe2110babfa13f1
af9d64a207a22853fe9bb02b2b326441454c68a8
/8 semester/SAIO/lab7/lab7.py
9f9ef90eb752d78f3cdf3ae7a85c64b98a88c739
[ "MIT" ]
permissive
https://github.com/andreishabanski/Labs
89f5a9a865580d03c94c56cd94bcf26ae50ef5a8
3ac775945bff5801642183c48de1e839685d5aa4
refs/heads/master
2016-03-25T19:54:54.055492
2015-05-24T06:49:20
2015-05-24T06:49:20
12,844,548
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import print_function from algoritms import DestinationCalculator from common.utils import print_matrix def main(): # table = [ # [2, 10, 9, 7], # [15, 4, 14, 8], # [13, 14, 16, 11], # [4, 15, 13, 19], # ] # table = [ # [2, -1, 9, 4], # [3, 2, 5, 1], # [13, 0, -3, 4], # [5, 6, 1, 2], # ] # table = [[6, 4, 13, 4, 19, 15, 11, 8], # [17, 15, 18, 14, 0, 7, 18, 7], # [3, 5, 11, 9, 7, 7, 18, 16], # [17, 10, 16, 19, 9, 6, 1, 5], # [14, 2, 10, 14, 11, 6, 4, 10], # [17, 11, 17, 12, 1, 10, 6, 19], # [13, 1, 4, 2, 2, 7, 2, 14], # [12, 15, 19, 11, 13, 1, 7, 8]] # table = [[2, 4, 0, 3, 8, -1, 6, 5], # [8, 6, 3, 4, 2, 0, 0, 4], # [8, -4, 3, 2, 7, 3, 1, 0], # [2, 4, 9, 5, 3, 0, 3, 8], # [5, 2, 7, 3, -1, 0, 3, 2], # [3, 2, 5, 1, 5, 3, 0, 1], # [2, 1, 0, -3, 1, 2, 7, 0], # [1, 6, 4, 0, 0, 9, 1, 7]] table = [ [3, -3, 4, 5, 8, 1], [-1, 0, 2, 4, 5, 6], [7, 3, 8, -1, 10, 15], [-1, 5, 9, 12, 4, 3], [2, 2, 12, 4, 3, 2], [1, 4, 3, 4, 20, 3] ] calculator = DestinationCalculator(table) x = calculator.calculator() print('\nSolution:') print('Cost = {}'.format(sum(table[i][j]*x[i][j] for i in xrange(0, len(table)) for j in xrange(0, len(table))))) print('X = ') print_matrix(x) if __name__ == '__main__': main()
UTF-8
Python
false
false
1,512
py
437
lab7.py
159
0.365079
0.201058
0
58
25.068966
117
sxu11/Inference_Stem_Cell_Dynamics
2,413,771,652,256
a2de87d4ede835f581dcc546a2b6d3f5d47f4d2e
22241442f216859e99560cbf33c75f8385c75dcd
/raw_data/diversities.py
ce9f6bcdb1043ef69943dc29ec99da2225eee18d
[]
no_license
https://github.com/sxu11/Inference_Stem_Cell_Dynamics
611274d39ebbd36ca1b67f4ec6b10dc7f005d67c
ea9ae43a86e7e48883e93284ca1cfa14e5965ac4
refs/heads/master
2020-04-25T00:56:48.869758
2019-05-24T05:47:52
2019-05-24T05:47:52
172,393,925
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pandas as pd pd.set_option('display.width', 3000) pd.set_option('display.max_columns', 3000) df = pd.read_csv('abunds/zh33.csv') cell_types = ['T', 'B', 'Mon', 'Gr', 'NK'] times = ['1m', '2m', '3m', '4.5m', '6.5m', '9.5m'] ts = [float(x[:-1]) for x in times] cols = {} for time in times: cols[time] = [] for col in df.columns.values: if (time in col) and ('node' not in col): cols[time].append(col) cols['6.5m'] = ['6.5m T', '6.5m B', '6.5m NK overall', '6.5m Mon', '6.5m Gr'] def alph_diversity(arr): return sum(arr>0) def gamm_diversity(arr): return 5 def beta_diversity(arr): return gamm_diversity(arr) - alph_diversity(arr) import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt '''https://ecopy.readthedocs.io/en/latest/diversity.html''' import ecopy # for time in times: # print cols[time] D_alpha, D_beta, D_gamma = ecopy.div_partition(df[cols['6.5m']].transpose(), method='spRich') # print D_alpha, D_beta, D_gamma # # # df_tmp = df[cols['6.5m']] # df_tmp = df_tmp[(df_tmp.T != 0).any()] # print df_tmp.shape # quit() # # tmps = [] # for i in range(5): # tmps.append(sum(df_tmp[cols['6.5m'][i]]>0)) # print sum(tmps)/5. # quit() alphs, betas, gammas = [], [], [] for time in times: D_alpha, D_beta, D_gamma = ecopy.div_partition(df[cols[time]].transpose(), method='spRich') alphs.append(D_alpha) betas.append(D_beta) gammas.append(D_gamma) matplotlib.rc('xtick', labelsize=12) matplotlib.rc('ytick', labelsize=12) fig, ax1 = plt.subplots() ax1.plot(ts, alphs, label=r'$\alpha$') ax1.plot(ts, gammas, label=r'$\gamma$') ax1.set_xlabel('month', fontsize=16) ax1.set_ylabel(r'$\alpha$ and $\gamma$ diversity', fontsize=16) plt.legend(loc=(.8,.8), fontsize=14) # ax2 = ax1.twinx() ax2.plot(ts, betas, 'k--', label=r'$\beta$') ax2.set_ylabel(r'$\beta$ diversity', fontsize=16) plt.legend(loc=(.8,.72), fontsize=14) plt.tight_layout() plt.show() # T_cols, B_cols, Mon_cols, Gr_cols, NK_cols = [], [], [], [], [] # for col in df.columns.values: # if 'T' in col: # T_cols.append(col) # elif 'B' in col: # B_cols.append(col) # elif 'Mon' in col: # Mon_cols.append(col) # elif 'NK' in col: # NK_cols.append(col) # elif 'Gr' in col: # Gr_cols.append(col) # # print T_cols, B_cols, Mon_cols, Gr_cols, NK_cols # df_T = df[T_cols[:-1]] # df_B = df[B_cols[:-1]] # # print df[Mon_cols]
UTF-8
Python
false
false
2,542
py
11
diversities.py
9
0.580645
0.551534
0
97
25.195876
78
fperi/cdk_step_function
15,934,328,685,403
80252fe391ed28109770d64c75b898141ae2f8b1
04c9905b1fc9fe2d85aea2624a1f32b461af64f6
/source/setup.py
de6b601a15e0af485de481676a650ac9d2dde427
[]
no_license
https://github.com/fperi/cdk_step_function
1afbd00814e82ab9ce4d68f6f60d2d2d34cf4a86
b3666b099f19b7d71783942a97a3e0e9e8c4056c
refs/heads/master
2023-05-01T08:53:26.679259
2021-05-08T17:52:22
2021-05-08T17:52:22
365,563,036
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from setuptools import find_packages, setup with open("requirements.txt") as stream: REQUIREMENTS = stream.read().splitlines() setup( name="cdk_test", version="1.0", description="Test app for cdk deployment", packages=find_packages(), python_requires=">=3.6", install_requires=REQUIREMENTS, zip_safe=False, )
UTF-8
Python
false
false
343
py
20
setup.py
10
0.682216
0.670554
0
14
23.5
46
Kreedols/Koostetehnoloogiaprojekt
8,615,704,432,982
04ff9c07b4ea112a5c7574f085f7bae9b2199706
71b935c6363593cbca813ac3068b1f6591cf35b9
/home folder/b.py
483457299ab82cef7b3bd831e3a2a155a3230fec
[]
no_license
https://github.com/Kreedols/Koostetehnoloogiaprojekt
a2f41970732c0d0913a5aa1b0b21825b9d7d3adb
0b739c381efbf649470c314d39c60fd32ae1cbf9
refs/heads/master
2020-05-21T05:26:37.922579
2019-01-09T22:41:49
2019-01-09T22:41:49
84,576,993
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import threading from array import array from Queue import Queue, Full import pyaudio CHUNK_SIZE = 1024 MIN_VOLUME = 500 BUF_MAX_SIZE = CHUNK_SIZE * 10
UTF-8
Python
false
false
155
py
44
b.py
25
0.76129
0.703226
0
10
14.5
30
wmgroot/servicer
4,492,535,794,467
7e79f25329036dea81eeb3ca1f566b8c01ccb9f2
a749274d74531a0c5806ac672e348c157d84d37a
/test/test_config_loader.py
2982614ac3d0dc1a187898a89c25009a65bcc419
[ "BSD-3-Clause" ]
permissive
https://github.com/wmgroot/servicer
a986e92772dee9831968500b1c36c7efd1eb89e0
13313bcaed2b487848f2b483aea13e538a4484cc
refs/heads/master
2023-05-27T01:35:55.709438
2019-10-22T22:06:45
2019-10-22T22:06:45
138,642,168
3
1
BSD-3-Clause
false
2019-09-02T18:25:49
2018-06-25T19:45:07
2019-08-20T18:36:53
2019-09-02T18:25:49
239
1
1
5
Python
false
false
from unittest import TestCase, mock from servicer.config_loader import ConfigLoader class ConfigLoaderTest(TestCase): def setUp(self): self.config_loader = ConfigLoader(args={}) class ConfigLoaderClassTest(ConfigLoaderTest): def test_initialized(self): pass class MergeConfigTest(ConfigLoaderTest): def setUp(self): super().setUp() self.config_loader.merge_config = mock.Mock(side_effect=self.config_loader.merge_config) def test_merges_empty_dicts(self): from_dict = {} to_dict = {} self.config_loader.merge_config(to_dict, from_dict) self.config_loader.merge_config.assert_has_calls([ mock.call(to_dict, from_dict), ]) self.assertEqual(to_dict, {}) def test_merges_empty_dict_into_non_empty(self): from_dict = {} to_dict = { 'foo': 'bar' } self.config_loader.merge_config(to_dict, from_dict) self.config_loader.merge_config.assert_has_calls([ mock.call(to_dict, from_dict), ]) self.assertEqual(to_dict, { 'foo': 'bar' }) def test_overwrites_values(self): from_dict = { 'tacos': 'tacodeli', } to_dict = { 'foo': 'bar', 'tacos': 'torchys', } self.config_loader.merge_config(to_dict, from_dict) self.config_loader.merge_config.assert_has_calls([ mock.call(to_dict, from_dict), ]) self.assertEqual(to_dict, { 'foo': 'bar', 'tacos': 'tacodeli', }) # note: list values are not combined, they are completely overwritten def test_overwrites_nested_values(self): from_dict = { 'numbers': [4, 5, 6], 'people': { 'astronaut': 'buzz aldrin', }, 'capitols': [ { 'brazil': 'not rio de janeiro' }, ], } to_dict = { 'numbers': [1, 2, 3], 'people': { 'astronaut': 'neil armstrong', 'philosopher': 'socrates', }, 'capitols': [ { 'france': 'paris' }, { 'brazil': 'rio de janeiro' }, ], } self.config_loader.merge_config(to_dict, from_dict) self.config_loader.merge_config.assert_has_calls([ mock.call(to_dict, from_dict), ]) self.assertEqual(to_dict, { 'numbers': [4, 5, 6], 'people': { 'astronaut': 'buzz aldrin', 'philosopher': 'socrates', }, 'capitols': [ { 'brazil': 'not rio de janeiro' }, ], })
UTF-8
Python
false
false
2,737
py
52
test_config_loader.py
45
0.505298
0.502009
0
97
27.216495
96
Harry887/ml
11,708,080,868,378
86bb0d3be3d3d94f51ce030178da24ff53807670
d92e4a0be6e31129fddfcb77453ea845a9ca4100
/Demo1/optimizer.py
2e6c73cbd6cfa339c0921e09eb8bea571cdd06b4
[]
no_license
https://github.com/Harry887/ml
13f22650136dbef1417094431c5caf2b1c629b13
539984fd9bfee9bffda5032ae0d312c0c75a0fbd
refs/heads/master
2020-04-23T07:32:39.299294
2019-02-16T13:54:02
2019-02-16T13:54:02
171,009,211
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import torch import torch.utils.data as Data import torch.nn.functional as F import matplotlib.pyplot as plt LR = 0.01 BATCH_SIZE = 32 EPOCH = 12 # fake dataset x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1) y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size())) # plot dataset plt.scatter(x.numpy(), y.numpy()) plt.show() # default network class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.hidden = torch.nn.Linear(1, 20) # hidden layer self.predict = torch.nn.Linear(20, 1) # output layer def forward(self, x): x = F.relu(self.hidden(x)) # activation function for hidden layer x = self.predict(x) # linear output return x if __name__ == '__main__': # different nets net_SGD = Net() net_Momentum = Net() net_RMSprop = Net() net_Adam = Net() nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam] # different optimizers opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR) opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8) opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9) opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99)) optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam] loss_func = torch.nn.MSELoss() losses_his = [[], [], [], []] # record loss
UTF-8
Python
false
false
1,449
py
5
optimizer.py
5
0.612836
0.590752
0
48
29.208333
82
RomanKamen/RomanKamen
8,821,862,866,787
e5c5c8703b301eab55ee41c3971d6f9e642781cf
1b49b0cb4d9c1e6805f1153eab9658dc733d4fd4
/GameEngine/player.py
2ae3310ce14bd8fc7390b3a000a03c282ce50107
[]
no_license
https://github.com/RomanKamen/RomanKamen
b308cc820df990de05cc84f894906c00b844de88
1c8b8a5d6dbc8dc5e03f5ff4453cfe772c60a79a
refs/heads/master
2020-07-09T08:14:50.380811
2019-08-24T01:02:39
2019-08-24T01:02:39
203,923,330
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
from GameEngine.func import deal_damage, game_initialized_only # TODO: end_act # TODO: timer decorator class Player: def __init__(self, deck): self.game = None self.deck = deck self.client_id = 1 self.side = '' self.hp = 30 self.armor = 0 self.mana = 0 def init_game(self, game): self.game = game def set_side(self, side): self.side = side for c in self.deck.cards: c.side = side @game_initialized_only def handle_attack(self, attacker): deal_damage(self, attacker) self.game.update() # @game_initialized_only # def make_move(self): # pass @game_initialized_only def attack_with_card(self, card, target): # mb create a decorator if self.game.current_player is self: card.attack(target) else: # mb assert print('Not your turn') @game_initialized_only def end_act(self): self.game.end_act() pass
UTF-8
Python
false
false
1,039
py
16
player.py
16
0.557267
0.552454
0
46
21.586957
62
jhyrkas/canne_synth
18,605,798,343,209
4d1b0d90d93896facfd971aa4fc4ea630c1e8278
480128c00099fdba1280172e18039e497dcffb92
/probably_useful_for_author_only/comp3.py
0e26e7b8b8dbc282fbfd11f27ff5397e23b5d0d8
[]
no_license
https://github.com/jhyrkas/canne_synth
acf5cff59f5d7d48967ee3c508204ee4739f1e43
e780c7b332b00a948a09e6ff1c985d99796013fb
refs/heads/master
2021-06-11T03:09:42.653769
2021-06-03T21:28:36
2021-06-03T21:28:36
194,894,284
0
0
null
true
2019-07-02T15:56:39
2019-07-02T15:56:39
2019-06-12T07:33:23
2018-09-27T01:12:58
14
0
0
0
null
false
false
import numpy as np import synth_architecture as sa import sounddevice as sd import soundfile as sf import librosa import os import scipy np.random.seed(20855144) #np.random.seed(212) path = 'comp3_audio/' if not os.path.exists(path) : os.mkdir(path) # ARCHITECTURE a = sa.Architecture('root', np.zeros(8)) a.add_network('channel1_carr', 'root') a.add_network('channel2_pred', 'root', predictive_feedback_mode=True) a.add_network('channel3_carr', 'root') a.add_network('channel3_pred', 'channel3_carr', predictive_feedback_mode=True) a.add_network('channel4_pred', 'root', predictive_feedback_mode=True) a.add_network('channel4_car', 'channel4_pred') a.add_network('channel5_pass', 'root', passthrough_network=True) carr_names = ['channel1_carr', 'channel3_carr', 'channel4_car'] # NOTES def gen_params(time, frames, low, high, osc, freq, phase) : ohm = np.linspace(0, time, frames) * freq * 2.0 * np.pi + phase if osc == 'sin' : sig = np.sin(ohm) elif osc == 'square' : sig = scipy.signal.square(ohm) elif osc == 'saw' : sig = scipy.signal.sawtooth(ohm) else : sig = scipy.signal.sawtooth(ohm, width=0.5) return ((sig + 1.0) / 2.0) * (high - low) + low osces = ['sin', 'square', 'saw', 'triangle'] audio_length = 30 nframes = a.get_num_frames(audio_length) params = np.zeros((nframes, 8)) pitches = np.linspace(-24, 12, 12) for i in range(1, 11) : # root params for j in range(8) : lo,hi = np.random.random(2) * (3.9 + i / 10) if lo > hi : lo,hi = hi,lo osc = osces[np.random.randint(4)] freq = np.random.random() phase = np.random.random() * 2.0 * np.pi params[:,j] = gen_params(audio_length, nframes, lo, hi, osc, freq, phase) a.update_params('root', params) # carrier params for carrier in carr_names : for j in range(8) : lo,hi = np.random.random(2) * (8.0 + i * 2.0) if lo > hi : lo,hi = hi,lo osc = osces[np.random.randint(4)] freq = np.random.random() phase = np.random.random() * 2.0 * np.pi params[:,j] = gen_params(audio_length, nframes, lo, hi, osc, freq, phase) a.update_params(carrier, params) channels = a.generate_audio(audio_length, pitches[i]) assert(len(channels) == 5) for channel in range(5) : sf.write(path + 'channel' + str(channel+1) + '_' + str(i) + '.wav', channels[channel], 44100) # ONE LAST DRONE #a.update_params('root', np.random.random(8) * 4.0) #sf.write(path + 'drone.wav', a.generate_audio(300, -18)[1], 44100)
UTF-8
Python
false
false
2,626
py
17
comp3.py
16
0.606626
0.573877
0
79
32.240506
101
idontknowinder/EVE-Prosper
11,905,649,364,889
03b4cb755c2ab3c5d93bd6a9b12b8efed9bca37a
15ce0e08d87a602cdbc49e8ebebcbe4a0151f3fd
/Scraps/central_dumpcrunch.py
c1cf4a00cb5ec1b9d504bc413076850a44db2abb
[]
no_license
https://github.com/idontknowinder/EVE-Prosper
74f7fceedfe08ef0392c78b69537550ac6272d11
6d103ace8e2f8cb332fcee83ce08035dc7dc87b4
refs/heads/master
2021-01-19T17:17:27.909668
2014-05-11T20:24:43
2014-05-11T20:24:43
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/Python27/python.exe ##Try 2.0 for processing dump files for loading SQL file ##Designed for UNIX environment ##For win/DOS, install cygwin: http://www.cygwin.com/ ## CYGWIN + MYSQLDB: ## --Install all the DB/Python modules for cygwin ## --run: easy_install mysql-python ## --Verify with python terminal: import MySQLdb ##Processes raw dump files from eve central: eve-central.com/dumps import csv, sys, math, os, gzip, getopt, subprocess, math import MySQLdb #NOTES: http://thingsilearned.com/2009/05/03/simple-mysqldb-example/ ## Globals ## DATABASE_HOST = "127.0.0.1" DATABASE_USER = "root" DATABASE_NAME = "eve_marketdata" DATABASE_PASSWD = "bar" DATABASE_PORT = "3306" dumpfile = "/central_dumps" datafile = "2012-01-01.dump" #default file for debug outfile = "result.csv" SQL = 0 #default print = CSV pwd_raw = os.popen("pwd") pwd = (pwd_raw.read()).rstrip() cleanlist = {} globalist = {} systemFilter="30000142" def main(): print "running main" #READ LIST OF FILES FROM DUMP FOLDER# cmdline = "ls %s%s/*.gz" % (pwd,dumpfile) try: filelist_raw = subprocess.call(cmdline, shell=True) except OSerror: subprocess.call("mkdir %s" % dumpfile, shell=True, stdout=subprocess.PIPE) filelist_raw = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE) output, err = filelist_raw.communicate() filelist = output.split("\n") #returns list of filenames filelist.pop() #Removes extra \n empty value from list for filezip in filelist: rawdump = gzip.open(filezip) raw_parse = loadCSV(rawdump) #parse file for order,data in raw_parse.iteritems(): buy_or_sell = "sell" if int(data["bid"]) == 1: buy_or_sell = "buy" if data["typeid"] in cleanlist: if data["systemid"] in cleanlist[data["typeid"]]: if buy_or_sell in cleanlist[data["typeid"]][data["systemid"]]: #existing entry case #general data values if float(data["price"]) > cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["max"]: cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["max"]=float(data["price"]) if float(data["price"]) < cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["min"]: cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["min"]=float(data["price"]) temp = cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] + int(data["volenter"]) delta = float(data["price"]) - cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["avg"] R = delta * (int(data["volenter"])/temp) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["avg"] += R cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["M2"] += (cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] * delta * R) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] += long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["var"] = cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["M2"]/cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["stdev"] = math.sqrt(cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["var"]) else: #typeid AND system exist, but not buy/sell key cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]={} #initialize general data values# cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["max"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["min"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["avg"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] = long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["region"] = int(data["regionid"]) #initialize running-average values# temp = long(data["volenter"]) delta = float(data["price"]) R = delta * long(data["volenter"]) / temp M2 = long(data["volenter"]) * delta * R cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["M2"] = M2 cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["var"] = M2/long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["stdev"] = 0 else: #typeid exists, but not for this system cleanlist[data["typeid"]][data["systemid"]]={} cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]={} #initialize general data values# cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["max"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["min"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["avg"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] = long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["region"] = int(data["regionid"]) #initialize running-average values# temp = long(data["volenter"]) delta = float(data["price"]) R = delta * long(data["volenter"]) / temp M2 = long(data["volenter"]) * delta * R cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["M2"] = M2 cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["var"] = M2/long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["stdev"] = 0 else: #initialize totally new key cleanlist[data["typeid"]] = {} cleanlist[data["typeid"]][data["systemid"]]={} cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]={} #initialize general data values# cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["max"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["min"] = float(data["price"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["avg"] = float(data["price"]) #o/ cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["vol"] = long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["region"] = int(data["regionid"]) #initialize running-average values# temp = long(data["volenter"]) delta = float(data["price"]) R = delta * long(data["volenter"]) / temp M2 = long(data["volenter"]) * delta * R cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["M2"] = M2 cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["var"] = M2/long(data["volenter"]) cleanlist[data["typeid"]][data["systemid"]][buy_or_sell]["stdev"] = 0 #print output #print cleanlist["39"][systemFilter] #prints whole price object for [type][location] outlist = list_izer(cleanlist, filezip) for line in range(1,15): if outlist[line][-1] is "": outlist[line][-1]="NULL" #cursor.execute("INSERT INTO \'rawdata\' VALUES (%d, %s, %d, %d, %s, %d, %d, %d, %d, NULL)" %/ line) (itemid,order_date,regionID,systemID,order_type,price_max_,price_min,price_avg,price_stdev) cursor.execute( "INSERT INTO rawdata (itemid,order_date,regionID,systemID,order_type,price_max_,price_min,price_avg,price_stdev) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" % tuple(outlist[line])) #cursor.execute("INSERT INTO rawdata (itemid) VALUE (%s)" % outlist[line][0]) #print outlist[line] rawdump.close() def list_izer(resultList, filepath): #takes cleanlist{} and returns a list-list-... array #filename needed to parse out date key #Used for CSV output (ill advised) #build Headder list_out = [] header = ["itemid","date","region","system","type","max","min","avg","stdev","other"] list_out.append(header) #parse date from filename (dump,filename) = filepath.split("%s/" % dumpfile) (date,dump) = filename.split(".dump") for itemid,first_dict in resultList.iteritems(): for system,second_dict in first_dict.iteritems(): for type,root_dict in second_dict.iteritems(): entry = [itemid,date,root_dict["region"],system,type,root_dict["max"],root_dict["min"],root_dict["avg"],root_dict["stdev"],""] list_out.append(entry) return list_out def loadCSV(filename): #accepts string filename and returns dict-dict object of requested file parsed_dump ={} CSV = csv.reader(filename) fields = CSV.next() for row in CSV: items = zip(fields, row) #Strips headder from CSV item = {} for (name,value) in items: item[name] = value.strip() #assigns values to dict using header as keys if item["orderid"] in parsed_dump: #repeated order case #update samples to relevent edge if item["price"] < parsed_dump[item["orderid"]]["price"] and parsed_dump[item["orderid"]]["bid"] is "1": #SELL ORDERS: lowest price matters parsed_dump[item["orderid"]]["price"]=item["price"] elif item["price"] > parsed_dump[item["orderid"]]["price"] and parsed_dump[item["orderid"]]["bid"] is "0": #BUY ORDERS: highest price maters parsed_dump[item["orderid"]]["price"]=item["price"] else: parsed_dump[item["orderid"]]=item #builds return dict-dict object return parsed_dump #parsed_dump[orderid]={orderid:###,regionid:###,...} def parseargs(argv): global dumpfile, datafile,outfile print "running parseargs" try: opts, args = getopt.getopt(argv, "i:d:o:S:h", ["input=","debug=","output=","SQL","help"]) except getopt.GetoptError: usage() sys.exit(1) for opt, arg in opts: if opt in ('-h', '--help'): print "help" usage() sys.exit(1) elif opt in ('-i', '--input='): dumpfile=arg elif opt in ('-d', '--debug='): datafile=arg elif opt in ('-o', '--output='): outfile=arg elif opt in ('-S', "--SQL"): SQL=1 else: usage() sys.exit(1) def initDB(): db = MySQLdb.connect(host=DATABASE_HOST, user=DATABASE_USER, passwd=DATABASE_PASSWD, port=int(DATABASE_PORT), db=DATABASE_NAME) cursor = db.cursor() #cursor.exectue("drop database %s; create database %s" % (DATABASE_NAME,DATABASE_NAME) ) #db = MySQLdb.connect(host=DATABASE_HOST, user=DATABASE_USER, passwd=DATABASE_PASSWD, port=int(DATABASE_PORT) db=DATABASE_NAME) return cursor class Entry (object): #stores the various values and running tallies for each vector key def __init__ (self,data): #takes dict. data from main iterator self.min self.max self.vol self.avg self.stdev if __name__ == "__main__": parseargs(argv=sys.argv) cursor=initDB() main()
UTF-8
Python
false
false
10,383
py
58
central_dumpcrunch.py
29
0.645093
0.638255
0
243
41.728395
203
projx/vm-scripts
19,112,604,487,642
633fdeac1e079414d63ecb2942e8cbac49941703
e957b965a397c4c1ddfefb8fc0548fdd6726b9ba
/vm-power-mgr.py
26f9cae96e778c9336e348b90119c5ab1cdd03c4
[]
no_license
https://github.com/projx/vm-scripts
28eb18026f633ba118e26d55b640c240c9de384f
1860d80dade72b176da05f8737c4b3226a589d07
refs/heads/master
2020-05-01T12:39:04.321812
2019-03-31T20:36:16
2019-03-31T20:36:16
177,471,490
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: Kelvin W # Date: 2019 # Description: A power saving script for my or your Dell PowerEdge ESXi homelab. # It can be used to power down, i.e. suspending VMs and shutting down # the host, then powering it back up via IPMI and resuming the VMs. from pyVim import connect, task from pyVmomi import vim from tools import cli from tools import tasks from pprint import pprint import atexit import time import requests import urllib3 import ssl import configparser import os import subprocess import argparse import sys import re import logging import traceback class logger: loggingEnabled = False initialised = False logger = None def __init__(self): if Logger.initialised == False: logger.setup() @staticmethod def setup(): logger.initialised = True logger.logger = logging.getLogger() formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s") console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) logger.logger.addHandler(console_handler) logger.logger.setLevel(logging.INFO) @staticmethod def set_enabled(status): logger.loggingEnabled = status @staticmethod def debug(str): if logger.loggingEnabled: logger.logger.debug(str) @staticmethod def info(str): if logger.loggingEnabled: logger.logger.info(str) """ Consolidates "tasks" returned by calls to pymomi, then allows wait() for the tasks to be completed... """ class ESXiTaskManager: def __init__(self): self.tasks = [] def add_task(self, task): self.tasks.append(task) def add_task_list(self, group): self.tasks = self.tasks + group def check(self): if len(self.tasks): for task in self.tasks: logger.info(task.info) else: logger.info("No tasks..") def _progress_output(self, task, progress): if progress is None: return try: progess = str(progress) if "error" in progress: return ## Just return at this point.. the exception handler in waitX() will deal with this if progress.isdigit(): progress = progress + "%" logger.info("{} on {}, progress is {}".format(task.info.descriptionId, task.info.entityName, progress)) except (TypeError) as e: pass def clear(self): self.tasks = list() def wait(self, show_progress=False): if len(self.tasks) > 0: try: if show_progress: progress_call = self._progress_output else: progress_call = None task.WaitForTasks(tasks=self.tasks, onProgressUpdate=progress_call) except (Exception) as e: logger.info("Houston, we have a problem: " + e.msg) """ Manages the connection to ESXi/vCenter hosts, including the accessing the server-instance and data contents """ class ESXiConnectionManager: def __init__(self, svr, usr, pss, prt): self.server_instance = None try: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) self.server_instance = connect.SmartConnectNoSSL(host=svr, user=usr, pwd=pss, port=prt) atexit.register(connect.Disconnect, self.server_instance) except IOError as ex: raise SystemExit("unable to connect to vCenter / ESXi host..") def get_server_instance(self): return self.server_instance def get_content(self): return self.server_instance.RetrieveContent() """ Incapsulate functions for identifying and managing the ESXi hosts """ class ESXiHostManager: def __init__(self): self.hosts = dict() def get_hosts(self, content, hostnames=[]): self.hosts = dict() host_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.HostSystem], True) ## Here we keep a class-local reference to the hosts, filtering for specific hosts if defined in "hostnames" for host in host_view.view: if len(hostnames) == 0: self.hosts[host.name] = host else: for hostname in hostnames: if (host.name == hostname): self.hosts[host.name] = host return self.hosts def shutdown_hosts(self, forced_shutdown=False): tasks = list() for key, host in self.hosts.items(): logger.info("Host {} is now being shutdown".format(key)) task = host.ShutdownHost_Task(forced_shutdown) tasks.append(task) return tasks """ Incapsulate functions for identifying and managing the VMs on ESXi hosts """ class ESXiVMManager: def __init__(self): self.vms = dict() def get_vms(self, hosts): self.vms = dict() for key, host in hosts.items(): for vm in host.vm: self.vms[vm.name] = vm return self.vms def set_note(self, vm, message): spec = vim.vm.ConfigSpec() spec.annotation = message task = vm.ReconfigVM_Task(spec) return task def suspend_vms(self): task_list = list() for key, vm in self.vms.items(): logger.info("VM {} is currently {}".format(key, vm.runtime.powerState)) if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: logger.info("VM {} is being suspended".format(key)) note_task = self.set_note(vm, "SCHEDULED_SUSPEND") task.WaitForTask(note_task) vm_task = vm.SuspendVM_Task() task_list.append(vm_task) return task_list def unsuspend_vms(self): task_list = list() for key, vm in self.vms.items(): logger.info("VM {} is currently {}".format(key, vm.runtime.powerState)) if vm.runtime.powerState == vim.VirtualMachinePowerState.suspended: logger.info("VM {} is being unsuspended".format(key)) note_task = self.set_note(vm, "SCHEDULED_UNSUSPEND") task.WaitForTask(note_task) vm_task = vm.PowerOnVM_Task() task_list.append(vm_task) return task_list """ IPMI access by making subsystem calls the ipmitools executable """ class IPMIManager: POWER_OFF = "Off" POWER_ON = "On" POWER_UP = "Up/On" def __init__(self, host, username, password): self.host = host self.username = username self.password = password def check_powered_on_result(self, result): result = str(result).lower() if result == self.POWER_ON.lower(): return True elif result == self.POWER_UP.lower(): return True else: return False def is_powered_on(self): result = self.get_power_status() return self.check_powered_on_result(result) def power_response_to_str(self, result): result = str(result) if result.lower() == self.POWER_UP.lower(): return "Powering UP" elif result.lower() == self.POWER_ON.lower(): return "Power is ON" elif result.lower() == self.POWER_OFF.lower(): return "Power is OFF" else: return "Power is UNKNOWN" def get_power_status(self): try: cmd = "ipmitool -I lanplus -H {} -U {} -P {} chassis power status".format(self.host, self.username, self.password) result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stderr = result.stderr.decode('UTF-8') stdout = result.stdout.decode('UTF-8') if stderr != "": raise Exception("Call to IPMI failed (check credentials?), " + stderr) pattern = "Chassis Power is (?P<status>.*)" result = re.search(pattern, stdout) return result.group("status") except subprocess.CalledProcessError as e: raise Exception("Unable to get power status, error was returned: " + e.output) def power_on(self): try: cmd = "ipmitool -I lanplus -H {} -U {} -P {} chassis power on".format(self.host, self.username, self.password) result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stderr = result.stderr.decode('UTF-8') stdout = result.stdout.decode('UTF-8') if stderr != "": raise Exception("Call to IPMI failed (check credentials?), " + stderr) logger.info("IPMI power ON operation, responded with " + stdout) pattern = ": (?P<status>.*)" result = re.search(pattern, stdout) status = self.check_powered_on_result(result.group("status")) return status except subprocess.CalledProcessError as e: raise Exception("Error unable to power on, the following error occured: " + e.output) """ A utility for loading the config file, extracting the user-defined settings, and ensuring that mandatory values are present """ class ConfigManager: def __init__(self, path): """ Initiate the class, reset all values to default.. """ self.config_path = path self.hosts = dict() def read(self): mandatory_fields = ["enabled", "esxi_host", "esxi_username", "esxi_password", "esxi_webport", "ipmi_enabled"] if os.path.exists(self.config_path): config_parser = configparser.ConfigParser() config_parser.read(self.config_path) host_no = 1; section_name = "HOST" while config_parser.has_section(section_name + str(host_no)): host = dict(config_parser.items(section_name + str(host_no))) for field in mandatory_fields: if field not in host: raise Exception("Mandatory config field %s not found in section [HOST%s]" % (field, host_no)) if len(host[field]) == 0: raise Exception("Mandatory config field %s has no VALUE in section [HOST%s]" % (field, host_no)) if host["enabled"] == "True": host["enabled"] = True elif host["enabled"] == "False": host["enabled"] = False else: raise Exception("[HOST%s] setting 'enabled' is invalid, must be True or False (case sensitive)" % (host_no)) if host["ipmi_enabled"] == "True": host["ipmi_enabled"] = True elif host["ipmi_enabled"] == "False": host["ipmi_enabled"] = False else: raise Exception("[HOST%s] setting 'ipmi_enabled' is invalid, must be True or False (case sensitive)" % (host_no)) esxi_host = host["esxi_host"] self.hosts[esxi_host] = host host_no += 1 else: raise Exception("Config file does not exist, cannot find " + self.config_path) class WebManager: def __init__(self, fqdn): self.fqdn = fqdn def test_web_server(self, delay, attempts): url = "https://{}/ui/#/login".format(self.fqdn) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) result = None for count in range(attempts): try: result = requests.head(url, verify=False, timeout=5) ## If we get a valid response... great if result.status_code == 200: logger.info("{} is NOW accessible, on attempt {}, total delay was {} seconds".format(url, count+1, count*delay)) logger.info("Allowing WebUI time to complete initialisation, delaying {} more seconds".format(delay)) time.sleep(delay) return True ## An exception is going to happen everytime a request fails, such as timeout or rejected connection except requests.ConnectionError as e: logger.info("{} is not responding, please wait - attempt {} of {}, delaying {} seconds".format(url, count + 1, attempts, delay)) finally: time.sleep(delay) ## If we're post exception, there will be no response, so nothing to do... else only log message if ## we didn't see a 200... this stop the message showing on function exit if result is not None: if result.status_code != 200: logger.info("{} is responding with HTTP {} - attempt {} of {}, delaying {} seconds".format(url, result.status_code, count + 1, attempts, delay)) result.close() return False def do_power_on_vms(host_config): connection_mgr = ESXiConnectionManager(host_config["esxi_host"], host_config["esxi_username"], host_config["esxi_password"], host_config["esxi_webport"]) host_mgr = ESXiHostManager() vm_mgr = ESXiVMManager() task_mgr = ESXiTaskManager() hosts = host_mgr.get_hosts(connection_mgr.get_content()) ## Process and pause VMs vms = vm_mgr.get_vms(hosts) task_list = vm_mgr.unsuspend_vms() task_mgr.add_task_list(task_list) task_mgr.wait(True) task_mgr.clear() def get_config_for_host(esxi_host): dir_path = os.path.dirname(os.path.realpath(__file__)) config = ConfigManager(dir_path +"/conf/esxi-hosts.conf") config.read() if esxi_host not in config.hosts: raise Exception("Host '{}' not found in the esxi-hosts.conf".format(host)) host_config = config.hosts[esxi_host] if host_config["enabled"] == False: raise Exception("Unable to perform action, host '{}' is DISABLED in the esxi-hosts.conf".format(esxi_host)) return host_config def get_power_status(esxi_host): logger.info("Loading config") host_config = get_config_for_host(esxi_host) if host_config["ipmi_enabled"] == True: ipmi = IPMIManager(host_config["ipmi_host"], host_config["ipmi_username"], host_config["ipmi_password"]) result = ipmi.get_power_status() logger.info("Current power status is: " + ipmi.power_response_to_str(result)) return ipmi.check_powered_on_result(result) def do_shutdown(esxi_host): logger.info("Loading config") host_config = get_config_for_host(esxi_host) connection_mgr = ESXiConnectionManager(host_config["esxi_host"], host_config["esxi_username"], host_config["esxi_password"], host_config["esxi_webport"]) host_mgr = ESXiHostManager() vm_mgr = ESXiVMManager() task_mgr = ESXiTaskManager() hosts = host_mgr.get_hosts(connection_mgr.get_content()) vms = vm_mgr.get_vms(hosts) task_list = vm_mgr.suspend_vms() task_mgr.add_task_list(task_list) task_mgr.wait(True) task_mgr.clear() ## Process Host... task_list = host_mgr.shutdown_hosts(True) task_mgr.add_task_list(task_list) task_mgr.wait(True) def do_power_on_host(host_config): ## IPMI Power On Blick logger.info("Begin IPMI processing") if host_config["ipmi_enabled"] == True: logger.info("Sending IPMI power on to " + host_config["esxi_host"]) ipmi = IPMIManager(host_config["ipmi_host"], host_config["ipmi_username"], host_config["ipmi_password"]) result = ipmi.power_on() return result else: logger.info("IPMI is not enabled for {}".format(host_config["esxi_host"])) return False def do_poweron(esxi_host): logger.info("Loading config") host_config = get_config_for_host(esxi_host) do_power_on_host(host_config) logger.info("Beginning checks for WebUI on " + host_config["esxi_host"]) web_check = WebManager(host_config["esxi_host"]) result = web_check.test_web_server(5, 100) if result == False: raise Exception("Unable to boot VMs, the WebUI on host '{}' did not respond in time!".format(esxi_host)) ## VM Power On Block do_power_on_vms(host_config) ## End VM def main(): parser = argparse.ArgumentParser("ESXi/BMC Power Manager") parser.add_argument("host", help="Name of the ESXi to perform operation on (must match esxi_host in the esxi-host.conf)", action="store") parser.add_argument("operation", choices=["up", "down", "status"], help="type of power operation to perform") parser.add_argument("-verbose", "--verbose", help="Output info and debug information, very useful for finding config problems", action='store_true') args = parser.parse_args() if args.verbose == True: logger.set_enabled(True) host = args.host operation = args.operation try: if operation == "up": logger.info("Received POWER UP for {}".format(host)) do_poweron(host) elif operation == "down": logger.info("Received POWER DOWN for {}".format(host)) do_shutdown(host) elif operation == "status": logger.info("Received POWER STATUS for {}".format(host)) result = get_power_status(host) print(result) except Exception as e: logger.info("Exception: " + str(e)) logger.info("Exception Trace: " + traceback.format_exc()) print("Error: " + str(e)) if __name__ == "__main__": logger.setup() main()
UTF-8
Python
false
false
17,700
py
2
vm-power-mgr.py
1
0.596723
0.594633
0
515
33.370874
168
mlautman/NN_MNEST
9,105,330,697,142
9995d32d752a0da0eb65af3b9e6f2500397f3e21
f8190ab585debc67632dea51fc66e9b52fb894ed
/src/im_t_wrap_bu.py
94f6ddb3b2cfeda8116781c4fbdc5629d3aa16e3
[]
no_license
https://github.com/mlautman/NN_MNEST
a72ba2ab2eba25fbcb4c26d9696bd9c5b5b99888
38a59cb8de9bf4b6287d6e45737c1f48f3d9a2a1
refs/heads/master
2020-05-20T12:19:06.509973
2014-04-25T18:28:32
2014-04-25T18:28:32
18,905,714
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import division import numpy as np from math import sqrt from src.show_images import show_image_np # Things we want to output while we run the algo # sum of get_z_delta for each run (sequential) # image of get_z_delta # Image animation as gif. def _initial_z_t(z_shape): return np.matrix([0]*z_shape[0]*z_shape[1]) def _initial_mew(): return 1 def _update_mew(mew_o): return (1 + sqrt(1+4*mew_o**2))/2 def _gamma(mew_n, mew_o): return (mew_o - 1)/mew_n def solve_for_z_t(W_T, y_t): return y_t*np.linalg.pinv(W_T) def toggle(a): return 1 if a == 0 else 0 class Image_t: def __init__(self, label, X, Y, Z, lambda1=.1): self.label = label self.lambda1 = lambda1 self.X = X self.Y = Y self.Z = Z self.Z_o = Z self.mew_o = [None]*X.shape[0] self.mew_n = [_initial_mew()]*X.shape[0] def update_lambda(self, lambda1): self.lambda1 = lambda1 def _compute_gamma(self, index): self.mew_o = self.mew_n self.mew_n = _update_mew(self.mew_n) return _gamma(self.mew_n, self.mew_o) def _S_lambda(self, u): u = u[0, :]/max(u.max(), abs(u.min())) for i, v in enumerate(u.tolist()[0]): if (v > 0) and (v > self.lambda1): u[0, i] = (v - self.lambda1 * v) elif (v < 0) and (v < -self.lambda1): u[0, i] = (v - self.lambda1 * v) else: u[0, i] = 0 return u def _hangman(self, u, M): return self._S_lambda( u - (self.y - u * M)*np.linalg.pinv(M) ) def update_z_t(self, W, S): M = W * S Z_n = self.Z z_o_i = toggle(self.z_new) gamma = self._compute_gamma() self.z[z_o_i] = self._hangman( self.z[z_n_i] + gamma * (self.z[z_n_i] - self.z[z_o_i]), M ) self.z_new = toggle(self.z_new) def get_z(self): return self.z[self.z_new] def get_z_delta(self): return self.z[self.z_new] - self.z[toggle(self.z_new)] def estimate_x(self, W): return self.get_z() * W def show_x_estimate(self, W): show_image_np(self.estimate_x(W)) def get_z_t_delta(self): return self.z[self.z_new] - self.z[self.z_new]
UTF-8
Python
false
false
2,339
py
17
im_t_wrap_bu.py
14
0.519025
0.504489
0
98
22.867347
68
opennorth/open511
12,421,045,444,391
80c255c1f984223d927fd7f0ceb80cadb587e4dd
91d950c5522baf7b47ca113d413c4cb35ec9a007
/open511/utils/serialization.py
7a93d15f1dcadad040f52350824361144901ab67
[ "MIT" ]
permissive
https://github.com/opennorth/open511
7f3bde31746717450dc73e2873d9e8d2cc533874
3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8
refs/heads/master
2020-06-05T00:55:18.333600
2015-11-17T23:33:20
2015-11-17T23:34:30
12,960,085
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json from lxml import etree XML_LANG = '{http://www.w3.org/XML/1998/namespace}lang' XML_BASE = '{http://www.w3.org/XML/1998/namespace}base' GML_NS = NS_GML = 'http://www.opengis.net/gml' NS_KML = 'http://www.opengis.net/kml/2.2' NS_PROTECTED = 'http://open511.org/namespaces/internal-field' NS_ATOM = "http://www.w3.org/2005/Atom" NS_AGE = "http://purl.org/atompub/age/1.0" NS_XHTML = 'http://www.w3.org/1999/xhtml' NS_GEORSS = 'http://www.georss.org/georss' NSMAP = { 'gml': NS_GML, 'protected': NS_PROTECTED, 'kml': NS_KML, 'atom': NS_ATOM, 'age': NS_AGE, 'html': NS_XHTML, 'georss': NS_GEORSS } def get_base_open511_element(lang=None, base=None, version=None): elem = etree.Element("open511", nsmap={ 'gml': NS_GML, }) if lang: elem.set(XML_LANG, lang) if base: elem.set(XML_BASE, base) if version: elem.set('version', version) return elem def make_link(rel, href): l = etree.Element('link') l.set('rel', rel) l.set('href', href) return l def is_tmdd(doc): # Does a given etree Element represent a TMDD document? return doc.tag != 'open511' and bool(doc.xpath('//FEU')) def deserialize(s): s = s.strip() try: doc = etree.fromstring(s) if is_tmdd(doc): # Transparently convert the TMDD on deserialize from ..converter.tmdd import tmdd_to_json return (tmdd_to_json(doc), 'json') return (doc, 'xml') except etree.XMLSyntaxError: try: return (json.loads(s), 'json') except ValueError: raise Exception("Doesn't look like either JSON or XML") def serialize(obj): if getattr(obj, 'tag', None): return etree.tostring(obj, pretty_print=True) return json.dumps(obj, indent=4)
UTF-8
Python
false
false
1,823
py
32
serialization.py
19
0.606692
0.586396
0
65
27
67