repo_name
stringlengths
7
111
__id__
int64
16.6k
19,705B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
151
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
2 values
repo_url
stringlengths
26
130
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
14.6k
687M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
12 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
10.2M
gha_stargazers_count
int32
0
178k
gha_forks_count
int32
0
88.9k
gha_open_issues_count
int32
0
2.72k
gha_language
stringlengths
1
16
gha_archived
bool
1 class
gha_disabled
bool
1 class
content
stringlengths
10
2.95M
src_encoding
stringclasses
5 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
10
2.95M
extension
stringclasses
19 values
num_repo_files
int64
1
202k
filename
stringlengths
4
112
num_lang_files
int64
1
202k
alphanum_fraction
float64
0.26
0.89
alpha_fraction
float64
0.2
0.89
hex_fraction
float64
0
0.09
num_lines
int32
1
93.6k
avg_line_length
float64
4.57
103
max_line_length
int64
7
931
FanciestW/UFO-Game
2,001,454,802,555
65f3a77ba799eae077b8b71b7b04d065989eb2cd
0a68e1311fed737738bf7ae806414d790967513f
/src/ufo_game/animation.py
6799f20bfd39430ce3d050f90c26b4852cc147bf
[]
no_license
https://github.com/FanciestW/UFO-Game
a44fb593ae4ea0f3ac31ebbf561dbf6c34a0a6cd
807bbfa0e3cce17d2c7661f61366150c5cd16875
refs/heads/master
2020-04-18T14:48:40.644483
2019-03-03T01:52:25
2019-03-03T01:52:25
167,598,427
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import time import os from . import ufo_animation def ufo_win(): for frame in ufo_animation.win: os.system('cls' if os.name == 'nt' else 'clear') print(frame) time.sleep(0.15) def ufo_lose(): for frame in ufo_animation.lose: os.system('cls' if os.name == 'nt' else 'clear') print(frame) time.sleep(0.15)
UTF-8
Python
false
false
361
py
12
animation.py
9
0.592798
0.576177
0
15
23.133333
56
lastmayday/Euler
8,478,265,452,857
6c664f6a38f0bc736f998f22fec707f382a58fa3
7a4f32606abf3cccab38764f0c04cb4b50007ebb
/32.py
1cecc8a573e6a1cfb5ece91079b3a95896176d7d
[]
no_license
https://github.com/lastmayday/Euler
8d464fb4b99d96aa43d2175b5851af22338ab51a
297638039105c99a4765061378c8e9d6b1d76dbb
refs/heads/master
2021-01-10T19:22:24.291197
2013-07-28T16:03:12
2013-07-28T16:03:12
6,731,722
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#-*- coding:utf-8 -*- def isPandigital(s): length = len(s) if length > 9: return False for i in xrange(1,length+1): if str(i) not in s: return False return True def pandigital(a, b): num = str(a) + str(b) + str(a*b) if len(num) != 9: return False return isPandigital(num) def main(): res = [] for a in xrange(0, 100000): for b in xrange(0, 100000): if len(str(a*b) + str(a) + str(b)) > 9: break if pandigital(a, b): res.append(a*b) print a,"*",b,"=",a*b print sum(set(res)) if __name__ == "__main__": main()
UTF-8
Python
false
false
675
py
39
32.py
39
0.466667
0.437037
0
33
19.454545
51
nihilus/idapatchwork
8,761,733,288,010
79b6d1bff57dded2237b520d542165f23043fb50
575eb07b208e17c8f9a7ef4e600c2ce0eb323b02
/examples/idapyemu_loop_code_segment.py
a2c9d6dcf92cc10a65a044e99ff8503bb0cc3f7a
[]
no_license
https://github.com/nihilus/idapatchwork
ffa425f0fd66fca098306534ab5cc1c2e49ae184
9a2f6519bb625d2fc32c289f9b7164e8e09ddcd0
refs/heads/master
2021-01-18T17:17:56.394784
2015-09-08T20:54:52
2015-09-08T20:54:52
42,138,430
2
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import sys, os, time, struct, re, string # !!! set your pyemu path plz2u !!! sys.path.append(r'C:\Code\Python\pyemu') sys.path.append(r'C:\Code\Python\pyemu\lib') from PyEmu import * textstart = SegByName(".text") textend = SegEnd(textstart) emu = IDAPyEmu() print "[*] Loading text section bytes into memory" currenttext = textstart while currenttext <= textend: emu.set_memory(currenttext, GetOriginalByte(currenttext), size=1) currenttext += 1 print "[*] Text section loaded into memory" for x in range(0, 20): emu.set_register("EIP", ScreenEA()) emu.set_stack_argument(0x4, x) emu.execute(end=0xdeafc0de) print emu.get_register("EAX")
UTF-8
Python
false
false
703
py
40
idapyemu_loop_code_segment.py
39
0.688478
0.674253
0
31
21.709677
69
IamConstantine/LeetCodeFiddle
18,837,726,593,478
ea7d957ea70cbb79c4463a3fc3a443ed7089893e
670c844e5cfa1cdf11212cc53972ecd8f7a25949
/python/ShortestPathLength.py
fe183b49f1dab1c7e6b2a9a06446706055a79b09
[]
no_license
https://github.com/IamConstantine/LeetCodeFiddle
74d580a0741e40397f1283beadd023f9b9323abd
5ec509505a394d53517fb005bbeb36745f06596a
refs/heads/master
2022-05-31T05:25:14.273605
2022-05-23T02:46:47
2022-05-23T02:46:47
100,910,327
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# https://leetcode.com/problems/shortest-path-visiting-all-nodes # Hard # T = O(2**N * N ** 2) - 2 ** N - possible mask combinations, # n ** 2 - worst case eeach node is connected to all other nodes # # S = O(2 ** N * N) def shortestPathLength(graph): n = len(graph) cache = {} ending_mask = (1 << n) - 1 # acts as visited bit map def dp(node, mask): state = (node, mask) if state in cache: return cache[state] # termination condition if mask & (mask - 1) == 0: # only 1 bit is set return 0 cache[state] = float('inf') for neighbour in graph[node]: if mask & (1 << neighbour): not_visited = 1 + dp(neighbour, mask) # dont mark visited, as we are allowed to revisit multiple times visited = 1 + dp(neighbour, mask ^ (1 << node)) cache[state] = min(cache[state], visited, not_visited) return cache[state] return min(dp(x, ending_mask) for x in range(n))
UTF-8
Python
false
false
1,022
py
313
ShortestPathLength.py
309
0.558708
0.544031
0
32
30.9375
119
lukehuang/appomatic
13,202,729,484,361
b25bc91f54872c984efbcd633099e07293d236ef
cfe03aecedaa085ffb63a376a1471b7805aec422
/appomaticcore/appomatic/settings.py
9b5950a4b243e9d1cc23e064333c12e1a8731af9
[]
no_license
https://github.com/lukehuang/appomatic
5ce46a33f2889ed638569af1d1d9c93f914249fc
4e3cbf9ca43a6421f3953ca667607b1b1b0a28d2
refs/heads/master
2021-01-23T05:56:08.864264
2017-08-08T12:46:44
2017-08-08T12:46:44
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import os import os.path import sys import itertools import appomatic.utils.app VIRTUALENV_DIR = os.environ['VIRTUAL_ENV'] PROJECT_DIR = os.path.abspath(os.path.dirname(__file__)) APP_DIR = os.path.join(VIRTUALENV_DIR, "apps") sys.path.append(APP_DIR) if os.environ.keys().count("PYTHONPATH") == 0 : os.environ["PYTHONPATH"] = "" def get_app_config_list(config_name): return tuple(value for value in itertools.chain.from_iterable(app.get(config_name, []) for app in APPOMATIC_APP_PARTS)) APPOMATIC_APPS = appomatic.utils.app.load_apps( list(appomatic.utils.app.get_pip_apps()) + list(appomatic.utils.app.get_dir_apps(APP_DIR))) APPOMATIC_APP_PARTS = appomatic.utils.app.sort_apps(APPOMATIC_APPS) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(VIRTUALENV_DIR, 'appomatic.db'), } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(VIRTUALENV_DIR, 'media') if not os.path.exists(MEDIA_ROOT): os.makedirs(MEDIA_ROOT) STATIC_ROOT = os.path.join(VIRTUALENV_DIR, 'static') if not os.path.exists(STATIC_ROOT): os.makedirs(STATIC_ROOT) # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' STATIC_URL = '/static/' STATICFILES_DIRS = get_app_config_list('STATICFILES_DIRS') # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'gig3(ofdyzr_g*lj-%uzh&k3ct2_y1cgh4h5321*xf8fnybd%k' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) + get_app_config_list('MIDDLEWARE_CLASSES') import django if django.VERSION[0] >= 1 and django.VERSION[1] >= 4: TEMPLATE_CONTEXT_PROCESSOR_AUTH = 'django.contrib.auth.context_processors.auth' else: TEMPLATE_CONTEXT_PROCESSOR_AUTH = 'django.core.context_processors.auth' TEMPLATE_CONTEXT_PROCESSORS = ( TEMPLATE_CONTEXT_PROCESSOR_AUTH, 'django.core.context_processors.i18n', 'django.core.context_processors.request', 'django.core.context_processors.media', 'django.core.context_processors.static', ) + get_app_config_list('TEMPLATE_CONTEXT_PROCESSORS') ROOT_URLCONF = 'appomatic.urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_DIR, 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles' ) + get_app_config_list('INSTALLED_APPS') for part in APPOMATIC_APP_PARTS: p = os.path.join(part['PATH'], '__settings__.py') if os.path.exists(p): with open(p) as f: exec f if DEBUG: print "Installed apps: " + ', '.join(app['NAME'] for app in APPOMATIC_APPS) print " Parts: " + ', '.join(part['NAME'] for part in APPOMATIC_APP_PARTS)
UTF-8
Python
false
false
4,690
py
56
settings.py
48
0.705544
0.7
0
143
31.797203
92
fcchou/tensorflow_models
13,469,017,454,585
65a73621413b9cf3892efef6c37946c8279a05d0
1baaa57322c398addbf912f97511d3b150a31fc8
/tensorflow_models/rbm.py
5bf2fd6165fc95a2b135f9c3b8b77cea113da1aa
[]
no_license
https://github.com/fcchou/tensorflow_models
45f8c6373b4054e6f9eee35818c516c450926674
57246ae6ac504e601eef872681950cb2b74f6b23
refs/heads/master
2020-12-03T00:07:46.018218
2017-08-01T06:49:02
2017-08-01T06:49:02
95,992,536
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import tensorflow as tf import numpy as np from tensorflow_models.util import MiniBatchData class RBM: def __init__( self, rbm_layer, learning_rate=0.001, n_iter=5000, batch_size=100, negative_sample_size=100, regularization=0.00001, cd_k=1, session=None, ): """Restricted Boltzmann Machine by Tensorflow. Both the visible and hidden layers are binary units. The training is performed with the standard contrastive divergence (CD). In additional the persistent-CD algorithm is used. Args: rbm_layer (RBMLayer): RBMLayer that defines the RBM structure learning_rate (float): Learning rate of the gradient descent. n_iter (int): Number of gradient descent iterations. batch_size (int): Size of the mini batch. negative_sample_size (int): Number of negative sample particles to kept during CD for each iteration. regularization (float): Strength of the L2 regularization. Use 0 to skip regularization. cd_k (int): Number of CD steps to perform. session (tf.Session): A Tensorflow session. If not given, creates a new session and initialize. """ curr_vars = set(tf.global_variables()) self.learning_rate = learning_rate self.n_iter = n_iter self.batch_size = batch_size self._regularization = regularization self._rbm_layer = rbm_layer self._x = tf.placeholder(tf.float32, [None, self._rbm_layer.visible_size]) # Input units # Negative particles placeholder self._negative_v = tf.placeholder(tf.float32, [negative_sample_size, self._rbm_layer.visible_size]) # Training and negative particle sampler self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self._cost_func()) self._persisted_v_updater = self._rbm_layer.gibbs_sample(self._negative_v, cd_k) self._persisted_v = None if session is None: self._sess = tf.Session() self._sess.run(tf.global_variables_initializer()) else: self._sess = session extra_vars = set(tf.global_variables()) - curr_vars self._sess.run(tf.variables_initializer(extra_vars)) def _cost_func(self): cost = ( tf.reduce_mean(self._rbm_layer.free_energy(self._x)) - tf.reduce_mean(self._rbm_layer.free_energy(self._negative_v)) + tf.nn.l2_loss(self._rbm_layer.weight) * self._regularization ) return cost def reconstruction_error(self, input_x): """Compute the model reconstruction error as a proxy of the model accuracy. Args: input_x (array-like): Input data to be used for error estimation (e.g. validation set). Returns: The log-loss reconstruction error. """ return self._sess.run(self._rbm_layer.reconstruction_error(input_x)) def train(self, x): """Train the model. Args: x (numpy.ndarray): Feature vectors for the training set. """ if self._persisted_v is None: # The negative particles (self._persisted_v) is initialized by running 20 steps of CD persisted_v = self._rbm_layer.gibbs_sample( tf.constant( np.random.randint(0, 1, size=self._negative_v.get_shape().as_list()), dtype=tf.float32, ), n_step=20, ) self._persisted_v = self._sess.run(persisted_v) data = MiniBatchData([x], batch_size=self.batch_size) for _ in range(self.n_iter): batch_xs, = data.next() self._persisted_v = self._sess.run( self._persisted_v_updater, feed_dict={self._negative_v: self._persisted_v}, ) self._sess.run(self.train_step, feed_dict={self._x: batch_xs, self._negative_v: self._persisted_v}) def reconstruct(self, input_x, n_step=1000): """Reconstruct the input vector by multi-step Gibbs sampling. Args: input_x (array-like): Input data to be reconstructed. n_step (int): Number of Gibbs sampling steps Returns: Reconstructed vectors, same shape as input_x. """ return self._sess.run(self._rbm_layer.gibbs_sample(tf.constant(input_x, dtype=tf.float32), n_step)) class RBMLayer: def __init__( self, weight, hbias, vbias, name='rbm-layer', ): """Restricted Boltzmann Machine Layer. Both the visible and hidden layers are binary units. Args: weight (tf.Variable): Weights of RBM. hbias (tf.Variable): Bias on hidden units. vbias (tf.Variable): Bias on visible unit. name (str): Name of the RBM layer. Raises: ValueError if the weight is not of shape [visible_size, hidden_size] """ self.visible_size = vbias.get_shape().as_list()[0] self.hidden_size = hbias.get_shape().as_list()[0] if weight.get_shape().as_list() != [self.visible_size, self.hidden_size]: raise ValueError('Incompatible weight and hbias/vias') # Model parameters with tf.name_scope(name): self.vbias = vbias self.hbias = hbias self.weight = weight @classmethod def from_shape(cls, visible_size, hidden_size, name='rbm-layer'): """Construct an RBMLayer with visible_size and hidden_size. Both the visible and hidden layers are binary units. Args: visible_size (int): Size of the visible layer. hidden_size (int): Size of the hidden layer. name (str): Name of the RBM layer. """ vbias = tf.Variable(tf.zeros([visible_size])) hbias = tf.Variable(tf.zeros([hidden_size])) weight_init_std = 4.0 * np.sqrt(6.0 / (visible_size + hidden_size)) weight = tf.Variable(tf.truncated_normal([visible_size, hidden_size], stddev=weight_init_std)) return cls(weight, hbias, vbias, name=name) def _visible2hidden(self, v): prob = tf.nn.sigmoid(tf.matmul(v, self.weight) + self.hbias) return self._sample_prob(prob), prob def _hidden2visible(self, h): prob = tf.nn.sigmoid(tf.matmul(h, tf.transpose(self.weight)) + self.vbias) return self._sample_prob(prob), prob def _reconstruct_v(self, v): h, _ = self._visible2hidden(v) v_new, prob = self._hidden2visible(h) return v_new, prob def free_energy(self, input_v): """compute free energy of a input visible vector given RBM parameters. Args: input_v (array-like): Input visible vector Returns (tf.Tensor): Free energy. """ return -( tf.reduce_sum(input_v * self.vbias, 1) + tf.reduce_sum(tf.log(1 + tf.exp(tf.matmul(input_v, self.weight) + self.hbias)), 1) ) def reconstruction_error(self, input_v): """Compute the reconstruction error as a proxy of the model accuracy. Args: input_v (tf.Tensor): Input data to be used for error estimation (e.g. validation set). Returns (tf.Tensor): The log-loss reconstruction error. """ _, p = self._reconstruct_v(input_v) # The log-loss error diverges when p == 0 or p == 1. Apply a min/max for the probability p = tf.clip_by_value(p, 1e-5, 1 - 1e-5) return -tf.reduce_mean(input_v * tf.log(p) + (1 - input_v) * tf.log(1 - p)) def gibbs_sample(self, input_v, n_step=1000): """Reconstruct the input vector by multi-step Gibbs sampling. Args: input_v (tf.Tensor): Input visible vector to be reconstructed. n_step (int): Number of Gibbs sampling steps Returns (tf.Tensor): Reconstructed vectors, same shape as input_x. """ v = input_v for i in range(n_step): v, _ = self._reconstruct_v(v) return v @staticmethod def _sample_prob(probs): """Generate binary samples given probability""" return tf.to_float(probs > tf.random_uniform(probs.get_shape()))
UTF-8
Python
false
false
8,380
py
10
rbm.py
9
0.591289
0.583174
0
225
36.248889
113
willdickson/path_integration_multi_launch
4,277,787,456,204
837cb7989a3b233f2b5b08ef9bfc744c34c8a6f8
bd4d014dea9be849cc2a0e562a64adea258e64bf
/read_yaml.py
3a76ac1343eb2ed942cb30dc62f76530a4f7c7f9
[]
no_license
https://github.com/willdickson/path_integration_multi_launch
988909d2aa7e512c7cb6be0042fa966b9e795b76
f1c52d1623f30bd37fca3949c4dbfe92a0076e52
refs/heads/master
2020-04-27T19:52:29.169192
2019-03-22T23:21:53
2019-03-22T23:21:53
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import print_function import sys import yaml filename = sys.argv[1] with open(filename,'r') as f: data = yaml.load(f) print(data)
UTF-8
Python
false
false
153
py
7
read_yaml.py
2
0.69281
0.686275
0
10
14.3
37
kamyh/master
1,108,101,604,087
82cb8c7a9cb8785409839fa66ad60fdde771eb5a
4aaf635a69a68ee4a41f8e289a51a4d7fcda06ea
/documents/sources/exemple_3/multiprocess_2.py
816c50f80f1bb45e957aa2a5a6b9908a12345739
[]
no_license
https://github.com/kamyh/master
10c0f7c45763abd1af3c626be26a8114fce7f802
b0ba6f846435edeebbaa2441f2a3a5205a9f3180
refs/heads/master
2021-01-19T22:04:54.732802
2017-03-08T10:14:31
2017-03-08T10:14:31
69,327,355
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from multiprocessing import Pool def f(x, y): return x+y if __name__ == '__main__': p = Pool(processes=5) print(p.starmap(f, [[1,2],[3,4],[5,6]]))
UTF-8
Python
false
false
160
py
100
multiprocess_2.py
55
0.55
0.50625
0
8
19.125
44
phatCoding/PiCameraConnector
2,405,181,712,083
941918f69bcfe27f11c93e8dd00f7d97fedf4d73
85a5dfc3a9c03fb2e810479aa3c5b93fc93b257b
/BrowserStream.py
d58467b042d0693fa596a500d53c068876c60ed9
[]
no_license
https://github.com/phatCoding/PiCameraConnector
0aff9274051ef74408aa7264a7ef2a993a9f4e49
72b089ac4bbcf5204aabc8f764f335017ec01a9a
refs/heads/master
2023-07-05T22:05:31.873633
2021-08-01T14:14:08
2021-08-01T14:14:08
390,832,735
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import cv2 , socket import numpy as np import base64 import mediapipe as mp from flask import Flask, render_template, Response app = Flask(__name__) BUFF_SIZE = 65536 UDP_IP_ADDRESS = '192.168.178.171' UDP_PORT_NO = 6789 msg = "Desk".encode() bytearray = bytearray(msg) clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) clientSock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFF_SIZE) clientSock.sendto(bytearray, (UDP_IP_ADDRESS, UDP_PORT_NO)) mpHands = mp.solutions.hands hands = mpHands.Hands() mpDraw = mp.solutions.drawing_utils if __name__ == "__main__": app.run(debug=True) def gen_frames(): while True: try: packet,_ = clientSock.recvfrom(BUFF_SIZE) data = base64.b64decode(packet, ' /') npdata = np.frombuffer(data, dtype=np.uint8) frame = cv2.imdecode(npdata,1) imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) res = hands.process(imgRGB) if res.multi_hand_landmarks: for handLms in res.multi_hand_landmarks: mpDraw.draw_landmarks(frame, handLms,mpHands.HAND_CONNECTIONS) #cv2.imshow("recv video", frame) ret, buffer = cv2.imencode('.jpg', frame) frame = buffer.tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result key = cv2.waitKey(1) & 0xFF except Exception as err: print(err) clientSock.close() cv2.destroyAllWindows() break if key == 27: clientSock.close() cv2.destroyAllWindows() break ####### @app.route('/') def index(): return render_template('index.html') @app.route('/video_feed') def video_feed(): return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
UTF-8
Python
false
false
1,944
py
4
BrowserStream.py
3
0.601852
0.579733
0
71
26.394366
116
jorgeamendezm/3D-visual-body-scan
17,282,948,410,540
91df48cc2062097682a77eca8f79f9eb070562f5
1637f7ae81a7813fb688dc43a02daff8d5fe03bd
/background.py
417f29901dc2cb68014783704afa06ce04d4931c
[]
no_license
https://github.com/jorgeamendezm/3D-visual-body-scan
d3858e9dfd385cc31243fc7b11df359c1cf6e4fa
eb32215e446c3e602b14bfdbff6242bdec00e1f8
refs/heads/master
2021-01-12T17:36:13.781378
2016-10-23T00:47:29
2016-10-23T00:47:29
71,614,919
2
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import cv2 import numpy as np from matplotlib import pyplot as plt f = cv2.imread("image000.jpg") bg = np.float32(f) det_obj = [] img = [] i = 5 while(i <= 241): title = "image%.3d.jpg" % (i) print title f = cv2.imread(title) img.append(f) f = np.float32(f) cv2.accumulateWeighted(f,bg, 0.15) res = f - bg res = cv2.convertScaleAbs(res) det_obj.append(res) #img.append(f) i += 5 # for f in det_obj: # cv2.imshow('saved', f) # cv2.waitKey(100) print det_obj[20] plt.subplot(1,2,1) plt.imshow(cv2.Canny(det_obj[30],50,100)) plt.subplot(1,2,2) plt.imshow(img[30]) plt.show()
UTF-8
Python
false
false
638
py
30
background.py
19
0.601881
0.532915
0
37
16.216216
41
papercodekl/MolecularGET
3,023,657,004,512
8c98e1b74c155cb568c2e51c8cf1c81d33332daf
f925caeebc9357c146188655b7b16d20251ee61a
/onmt/GCN.py
fbdf3ea9911c718a6dbdffddb7eabcce919ac11d
[]
no_license
https://github.com/papercodekl/MolecularGET
231c086c0e157007a079402c1971ffa5f4e2d7ff
6fff202dc3c44802832c64a01b7f628c3173c741
refs/heads/master
2020-07-31T08:21:24.603522
2020-01-01T05:48:56
2020-01-01T05:48:56
210,543,189
8
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import dgl import dgl.function as fn import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import time def gcn_reduce(nodes): msgs = torch.cat((nodes.mailbox['h'], nodes.data['h'].unsqueeze(1)), dim = 1) msgs = torch.mean(msgs, dim = 1) return {'h': msgs} def gcn_msg(edges): return {'h': edges.src['h']} class NodeApplyModule(nn.Module): def __init__(self, in_feats, out_feats): super(NodeApplyModule, self).__init__() self.fc = nn.Linear(in_feats, out_feats, bias = True) def forward(self, node): h = self.fc(node.data['h']) h = F.relu(h) return {'h' : h} class GCN(nn.Module): def __init__(self, in_feats, out_feats): super(GCN, self).__init__() self.apply_mod = NodeApplyModule(in_feats, out_feats) def forward(self, g, features): g.ndata['h'] = features g.update_all(gcn_msg, gcn_reduce) g.apply_nodes(func = self.apply_mod) return g.ndata.pop('h') # 2 layers GCN class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.gcn1 = GCN(256, 256) self.gcn2 = GCN(256, 256) # self.fc = nn.Linear(70, 15) def forward(self, g, features): x = self.gcn1(g, features) x = self.gcn2(g, x) g.ndata['h'] = x # hg = dgl.mean_nodes(g, 'h') return x
UTF-8
Python
false
false
1,421
py
13
GCN.py
11
0.56228
0.545391
0
61
22.295082
81
dr-dos-ok/Code_Jam_Webscraper
19,421,842,123,513
1808fb170b2e6d60975d3bd7dce90519ca0ccafd
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_138/1583.py
fe73d46d282090410abf40e4fec9bd3fc57c5c59
[]
no_license
https://github.com/dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
inputfile = 'D-large.in' outputfile = 'outputfile.txt' fi = open(inputfile) fo = open(outputfile, 'w') test_cases = int(fi.readline()) print test_cases for case in range(test_cases): N = int(fi.readline()) line = fi.readline() line = line.strip('\n') naomi = map(float, line.split(' ')) naomi.sort() naomi_1 = map(float, line.split(' ')) naomi_1.sort() line = fi.readline() ken = map(float, line.split(' ')) ken.sort() ken_1 = map(float, line.split(' ')) ken_1.sort() naomi_score = 0 ken_score = 0 naomi_deceit_score = 0 ken_deceit_score = 0 for i in range(N): naomi_chosen = naomi[i] ken_may = [j for j in ken if j >= naomi_chosen] if len(ken_may) > 0: ken_chosen = ken_may[0] else: ken_chosen = ken[0] ken.remove(ken_chosen) if naomi_chosen > ken_chosen: naomi_score = naomi_score +1 else: ken_score = ken_score+1 ken_small = min(ken_1) naomi_may_1 = [j for j in naomi_1 if j >= ken_small] if len(naomi_may_1) > 0: naomi_chosen_1 = naomi_may_1[0] naomi_told = max(ken_1) + 0.0000001 else: naomi_chosen_1 = min(naomi_1) naomi_told = max(ken_1) - 0.0000001 naomi_1.remove(naomi_chosen_1) ken_may_1= [j for j in ken_1 if j >= naomi_told] if len(ken_may_1) > 0: ken_chosen_1 = ken_may_1[-1] else: ken_chosen_1 = ken_1[0] ken_1.remove(ken_chosen_1) if naomi_chosen_1 > ken_chosen_1: naomi_deceit_score = naomi_deceit_score + 1 else: ken_deceit_score = ken_deceit_score + 1 fo.write('Case #' + str(case+1) + ': ' + str(naomi_deceit_score) + ' ' + str(naomi_score) + '\n') fi.close() fo.close()
UTF-8
Python
false
false
1,607
py
60,747
1583.py
60,742
0.611699
0.574362
0
70
21.9
99
isaacsorensen/tsunamibayes
17,617,955,870,032
4b6e4464b95cfa65b2331ef497af6e2fdfa857e7
69df65414150d7a64061efa670b0dc667bf2e441
/Model_Scripts/Scenarios/1852grl/Classes/Custom.py
af49bab2571b492fac85584c9db2ef449f9c818d
[]
no_license
https://github.com/isaacsorensen/tsunamibayes
af8c772e87f3bf3f50151492f233e33296be25e8
a3a25438bd9b4c874e8db12d752bc04bcf19d115
refs/heads/master
2022-12-23T15:50:38.355970
2020-04-10T21:10:30
2020-04-10T21:10:30
264,291,140
0
0
null
true
2020-05-15T20:22:47
2020-05-15T20:22:46
2020-04-10T21:10:37
2020-05-14T18:09:23
177,994
0
0
0
null
false
false
""" Created By Cody Kesler Created 10/19/2018 Property of BYU Mathematics Dept. """ import pandas as pd from scipy.stats import gaussian_kde import numpy as np from scipy import stats from MCMC import MCMC from Prior import Prior from scipy.stats import truncnorm class Custom(MCMC): """ Use this class to create a custom prior and custom earthquake parameters MCMC draws When the Variable for use_custom is set to true, this class will be used as the main MCMC class for the Scenario """ def __init__(self): MCMC.__init__(self) #self.sample_cols = ['Strike', 'Length', 'Width', 'Depth', 'Slip', 'Rake', 'Dip', 'Longitude', 'Latitude'] #self.proposal_cols = ['P-Strike', 'P-Length', 'P-Width', 'P-Depth', 'P-Slip', 'P-Rake', 'P-Dip', 'P-Logitude', 'P-Latitude'] self.sample_cols = ['Strike', 'Length', 'Width', 'Slip', 'Longitude', 'Latitude'] self.proposal_cols = ['P-Strike', 'P-Length', 'P-Width', 'P-Slip', 'P-Logitude', 'P-Latitude'] self.observation_cols = ['Mw', 'gauge 1 arrival', 'gauge 1 height', 'gauge 2 arrival', 'gauge 2 height', 'gauge 3 arrival', 'gauge 3 height', 'gauge 4 arrival', 'gauge 4 height', 'gauge 5 arrival', 'gauge 5 height', 'gauge 6 arrival', 'gauge 6 height'] def get_length(self, mag): """ Length is sampled from a truncated normal distribution that is centered at the linear regression of log10(length_meters) and magnitude. Linear regression was calculated from wellscoppersmith data. Parameters: mag (float): the magnitude of the earthquake Returns: length (float): a sample from the normal distribution centered on the regression """ m1 = 0.6423327398 # slope c1 = 0.1357387698 # y intercept e1 = 0.4073300731874614 # Error bar #Calculate bounds on error distribution a = mag * m1 + c1 - e1 b = mag * m1 + c1 + e1 return 10**truncnorm.rvs(a,b,size=1)[0] #regression was done on log10(length) def get_width(self, mag): """ Width is sampled from a truncated normal distribution that is centered at the linear regression of log10(width_meters) and magnitude Linear regression was calculated from wellscoppersmith data. Parameters: mag (float): the magnitude of the earthquake Returns: width (float): a sample from the normal distribution centered on the regression """ m2 = 0.4832185193 # slope c2 = 0.1179508532 # y intercept e2 = 0.4093407095518345 # error bar #Calculate bounds on error distribution a = mag * m2 + c2 - e2 b = mag * m2 + c2 + e2 return 10**truncnorm.rvs(a, b, size=1)[0] #regression was done on log10(width) def get_slip(self, length, width, mag): """Calculated from magnitude and rupture area, Ron Harris gave us the equation Parameters: Length (float): meters Width (float): meters mag (float): moment magnitude Return: slip (float): meters """ #Dr. Harris' rigidity constant : 32e11 dynes/cm^2 mu = 3.2e15 # changing cm^2 to m^2 slip = 10**(3/2 * ( mag + 6.06 )) / (mu * length * width) return slip def acceptance_prob(self, cur_prior_lpdf, prop_prior_lpdf): """ Calculate the acceptance probability given the lpdf for the current and proposed parameters :param prop_prior_lpdf: proposed parameters likelihood :param cur_prior_lpdf: current parameters likelihood :return: """ change_llh = self.change_llh_calc() # Log-Likelihood change_prior_lpdf = prop_prior_lpdf - cur_prior_lpdf print("prop_prior_lpdf is:") print(prop_prior_lpdf) print("cur_prior_lpdf is:") print(cur_prior_lpdf) # Note we use np.exp(new - old) because it's the log-likelihood return min(1, np.exp(change_llh+change_prior_lpdf)) def draw(self, prev_draw): """ Draw with the random walk sampling method, using a multivariate_normal distribution with the following specified std deviations to get the distribution of the step size. Returns: draws (array): An array of the 9 parameter draws. """ # Std deviations for each parameter, the mean is the current location strike_std = 5. # strike_std = 1. length_std = 5.e3 # length_std = 2.e4 width_std = 2.e3 # width_std = 1.e4 slip_std = 0.5 # slip_std = 0.5 longitude_std = 0.15 # longitude_std = .025 latitude_std = 0.15 # latitude_std = .025 mean = np.zeros(6) # square for std => cov cov = np.diag(np.square([strike_std, length_std, width_std, slip_std, longitude_std, latitude_std])) cov *= 0.25; # random draw from normal distribution e = stats.multivariate_normal(mean, cov).rvs() # does sample update normally vals = prev_draw.values + e new_draw = pd.DataFrame(columns=self.sample_cols) new_draw.loc[0] = vals print("Random walk difference:", e) print("New draw:", new_draw) # return new draw (pandas series) return new_draw.loc[0] #stub for magnitude sampling #strike_std = 5. #longitude_std = 0.15 #latitude_std = 0.15 #magnitude_std = 0.1 #garret arbitraily chose this ## square for std => cov #cov = np.diag(np.square([strike_std, longitude_std, latitude_std, magnitude_std])) #mean = np.zeros(4) #cov *= 0.25 ## random draw from normal distribution #e = stats.multivariate_normal(mean, cov).rvs() ## does sample update normally #print("Random walk difference:", e) #print("New draw:", prev_draw.values + e) ##prev_draw should be a pandas but we will change to arrays until we get it all worked out #temp = prev_draw.values + e #length = self.get_length(temp['Magnitude']) #these are floats so the hstack below will break #width = self.get_width(temp['Magnitude']) #return np.hstack((temp,length,width)) def build_priors(self): """ Builds the priors :return: """ samplingMult = 50 bandwidthScalar = 2.0 # build longitude, latitude and strike prior data = pd.read_excel('./InputData/Fixed92kmFaultOffset50kmgapPts.xlsx') data = np.array(data[['POINT_X', 'POINT_Y', 'Strike']]) distrb0 = gaussian_kde(data.T) # build dip, rake, depth, length, width, and slip prior vals = np.load('./InputData/6_param_bootstrapped_data.npy') vals_1852=vals[:,3:] vals_1852 = np.log(vals_1852) distrb1 = gaussian_kde(vals_1852.T) distrb1.set_bandwidth(bw_method=distrb1.factor * bandwidthScalar) dists = [distrb0, distrb1] # DEPRICATED? # dists = {} # dists[distrb0] = ['Longitude', 'Latitude', 'Strike'] # dists[distrb1] = ['Dip', 'Rake', 'Depth', 'Length', 'Width', 'Slip'] # 'Dip', 'Rake', 'Depth', 'Length', 'Width', 'Slip' return Prior(dists) def map_to_okada(self, draws): """ TODO: JARED AND JUSTIN map to okada space :param draws: :return: okada_params """ #GRL-style 6-parameter sampling lon = draws["Longitude"] lat = draws["Latitude"] strike = draws["Strike"] length = draws["Length"] width = draws["Width"] slip = draws["Slip"] ##stub for magnitude sampling ##mw = draw["Magnitude"] #mw = 8.0 #PLACEHOLDER #length = self.get_length(mw) #width = self.get_width(mw) #slip = self.get_slip(length, width, mw) #deterministic okada parameters rake = 90 dip = 13 depth = self.doctored_depth_1852_adhoc(lon, lat, dip) #vals = np.array([strike, length, width, depth, slip, rake, dip, lon, lat]) #okada_params = pd.DataFrame(columns=self.sample_cols) #okada_params.loc[0] = vals okada_params = np.array([strike, length, width, depth, slip, rake, dip, lon, lat]) return okada_params def make_observations(self, params, arrivals, heights): """ Computes the observations to save to the observations file based off the earthqauke parameters and the wave heights and arrival times at each gauge. The default setting is to save the earthquake magnitude, and the arrival times and wave heights at each gauge. :param params: numpy array of Okada parameters :param arrivals: list of arrival times for the specified gauges :param heights: list of Geoclaw produced wave heights at each gauge :return: a list that provides the observations in the correct ordering """ obvs = [] #obvs[0] = self.compute_mw(params[1], params[2], params[4]) #first the magnitude obvs.append(self.compute_mw(params["Length"], params["Width"], params["Slip"])) #first the magnitude for ii in range(len(arrivals)): #alternate arrival times with wave heights obvs.append(arrivals[ii]) obvs.append(heights[ii]) return obvs def compute_mw(self, L, W, slip, mu=30.e9): """ Computes the Magnitude for a set of porposal parameters for saving :param L: float: Length of Earthquake :param W: float: Width of Earthquake :param slip: float: Slip of Earthquake :param mu: :return: Magnitude of Earthquake """ unitConv = 1e7 # convert from Nm to 1e-7 Nm Mw = (2 / 3) * np.log10(L * W * slip * mu * unitConv) - 10.7 return Mw def haversine_distance(self, p1, p2): """ This function is set up separately because the haversine distance likely will still be useful after we're done with this adhoc approach. Note, this does not account for the oblateness of the Earth. Not sure if this will cause a problem. """ r = 6371000 # Setting up haversine terms of distance expansion hav_1 = np.power(np.sin((p2[1] - p1[1]) / 2 * np.pi / 180), 2.0) hav_2 = np.cos(p2[1] * np.pi / 180) * np.cos(p1[1] * np.pi / 180) * np.power( np.sin((p2[0] - p1[0]) / 2 * np.pi / 180), 2.0) # taking the arcsine of the root of the sum of the haversine terms root = np.sqrt(hav_1 + hav_2) arc = np.arcsin(root) # return final distance between the two points return 2 * r * arc def doctored_depth_1852_adhoc(self, longitude, latitude, dip): """ This is a function written specifically for our 1852 depth fix. We make use of the fault points used in generating our prior as jumping off point for fixing the depth of an event. We use a simple trig correction based on a 20degree dip angle and the haversine distance to get the depth of the earthquake in question. Note, this will do the dip correction regardless of which side of the trench our sample is on. Recognizing when the sample is on the wrong side seems nontrivial, so we have not implemented a check for this here. """ ## set up sample point and fault array #p1 = np.array([longitude, latitude]) #fault_file = './InputData/fault_array.npy' #fault_array = np.load(fault_file) ## will store haversine distances for comparison #dist_array = np.zeros(len(fault_array)//2) #for i in range(len(dist_array)): # x = fault_array[2 * i] # y = fault_array[2 * i + 1] # p2 = np.array([x, y]) # dist_array[i] = self.haversine_distance(p1, p2) #dist = np.amin(dist_array) ## need to add trig correction #return (20000 + dist * np.tan(20 * np.pi / 180)) #set up sample point and fault array p1 = np.array([longitude,latitude]) fault_file = './InputData/fault_array.npy' fault_array = np.load(fault_file) #will store haversine distances for comparison dist_array = np.zeros(len(fault_array)//2) for i in range(len(dist_array)): x = fault_array[2*i] y = fault_array[2*i + 1] p2 = np.array([x,y]) dist_array[i] = self.haversine_distance(p1, p2) dist = np.amin(dist_array) distind = np.argmin(dist_array) arcpt = np.array([fault_array[2*distind],fault_array[2*distind + 1]]) #arcmidpt = np.array([129.,-6.]) #dist_array = np.zeros(len(fault_array)//2) #for i in range(len(dist_array)): # x = fault_array[2*i] # y = fault_array[2*i + 1] # p2 = np.array([x,y]) # dist_array[i] = self.haversine_distance(arcmidpt, p2) #midptdist = np.amin(dist_array) #midpoint of prior arc arcmidpt = np.array([129.,-6.]) #distance between midpoint and point distpt = self.haversine_distance(p1, arcmidpt) #distance between midpoint and arc distarc = self.haversine_distance(arcpt, arcmidpt) slope = 1. if distpt < distarc else -1. print("distpt is:") print(distpt) print("distarc is:") print(distarc) #need to add trig correction return (21316. + slope*dist*np.tan(dip*np.pi/180)) def init_guesses(self, init): """ Initialize the sample parameters :param init: String: (manual, random or restart) :return: """ guesses = None if init == "manual": # initial guesses taken from sample 49148 of 20190320_chains/007 #strike = 1.90000013e+02 #length = 1.33325981e+05 #width = 8.45009646e+04 #depth = 5.43529311e+04 #slip = 2.18309283e+01 #rake = 9.00000000e+01 #dip = 1.30000000e+01 #long = 1.30850829e+02 #lat = -5.45571375e+00 #guesses = np.array([strike, length, width, depth, slip, rake, dip, # long, lat]) strike = 1.90000013e+02 length = 1.33325981e+05 width = 8.45009646e+04 slip = 2.18309283e+01 lon = 1.30850829e+02 lat = -5.45571375e+00 #guesses = np.array([strike, length, width, slip, long, lat]) vals = np.array([strike, length, width, slip, lon, lat]) guesses = pd.DataFrame(columns=self.sample_cols) guesses.loc[0] = vals elif init == "random": prior = self.build_priors() guesses = prior.rvs(1) #guesses = pd.DataFrame(columns=self.sample_cols) #guesses.loc[0] = vals print("initial sample is:") print(guesses) #raise Exception('random initialization not currently tested') elif init == "restart": #guesses = np.load('../samples.npy')[0][:9] ## np.save("guesses.npy", self.guesses) #print("initial sample is:") #print(guesses) raise Exception('restart initialization not currently implemented') print("initial sample:") print(guesses) return guesses
UTF-8
Python
false
false
15,557
py
67
Custom.py
42
0.587453
0.551777
0
406
37.317734
260
kingsdigitallab/gitan
7,430,293,464,061
d8cad94ea3c1ad9227ffd7e40d7defcf042b26c3
3d64cf48a9210a1848d9633c072b022efe43fd8f
/settings.py
fff9c8a4aa459e6d89693bed07bc77d670cb277d
[ "MIT" ]
permissive
https://github.com/kingsdigitallab/gitan
502b0d93a87f273469d8ff8a3db47c3638a658e4
1c23ea16bb6373f4bbd92b4d7dc0b2a697069b23
refs/heads/master
2021-06-11T22:38:32.795895
2019-03-27T21:11:49
2019-03-27T21:11:49
178,069,433
0
0
MIT
false
2021-06-01T23:39:55
2019-03-27T20:25:04
2020-04-03T11:37:53
2021-06-01T23:39:52
176
0
0
1
Python
false
false
# imported from local_settings # DO NOT place secret keys in this file DATA_PATH = 'data' GIT_MODEL_QUERIES = [ 'org:kcl-ddh filename:models.py', 'org:kingsdigitallab filename:models.py', ]
UTF-8
Python
false
false
198
py
5
settings.py
3
0.712121
0.712121
0
7
27.285714
45
mariomitte/rpilinijapogona
481,036,361,899
612a5f1518c8ba121e62929b244d4ba61253e029
823df2d441e1088ca58f6595899e69948942cca5
/pogon1/api/urls.py
59563f1b5c47796e3aeb1fb6e94c0115f8c54a1b
[]
no_license
https://github.com/mariomitte/rpilinijapogona
aa171b45bb371d98f972db191894a1ab7eae984d
926a73b921e1c60e30f9dafaeff3524a366283bd
refs/heads/master
2023-02-27T20:23:29.300413
2021-01-29T02:59:55
2021-01-29T02:59:55
111,656,336
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls import url, include from rest_framework.routers import DefaultRouter from pogon1.api.views import UpravljanjeViewSet, CvorViewSet # kreiranje url pogleda za API router = DefaultRouter() router.register(prefix='upravljanje', viewset=UpravljanjeViewSet) urlpatterns = [ url(r'^cvor/$', CvorViewSet.as_view()), ] urlpatterns += router.urls
UTF-8
Python
false
false
367
py
41
urls.py
18
0.779292
0.776567
0
13
27.230769
65
aleadra/Spell-Check-Project
7,902,739,833,849
86350e050bdb627297a6e80db9b8b54276698690
1ada788bf548000469c99687eba4e287af00d1a0
/src/test_utils.py
9ba07fd0cce3327dc66e199b1a8b0c993bda918f
[]
no_license
https://github.com/aleadra/Spell-Check-Project
26781abee84c40fb40935b3883baa8e47b04ba08
023916820509c9147eb293f294f261a6fbcaa2c2
refs/heads/master
2023-07-08T06:33:27.946464
2012-11-28T13:45:31
2012-11-28T13:45:31
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import utils import unittest import lexicon from suggestion import Suggestion class UtilsTest(unittest.TestCase): def setUp(self): # self.spell_checker = utils.SpellChecker() self.lexicon = lexicon.Lexicon(word_list = [ 'yo', 'boyz', 'foo', 'giant', 'cell', 'M', 'forward', 'kick', 'football', 'hall', 'of', 'fame']) def tearDown(self): pass def test_get_EP(self): test_dict = { 1: [('foo', 0.3), ('bar', 0.7)], 2: [('baz', 0.8), ('bah', 0.2)]} ans_dict = { 1: ['bar', 'jack'], 2: ['all', 'work', 'baz'] } self.assertAlmostEqual(utils.get_EP([1, 2], test_dict, ans_dict), 0.75) self.assertAlmostEqual(utils.get_EP([1], test_dict, ans_dict), 0.7) def test_get_EP_corner_cases(self): test_dict = { 1: [('foo', 0.3), ('bar', 0.7)], 2: [('baz', 0.8), ('bah', 0.2)]} # key's ans_dict value is [] ans_dict = { 1: [], 2: [] } self.assertAlmostEqual(utils.get_EP([1, 2], test_dict, ans_dict), 0) # query_list is empty self.assertAlmostEqual(utils.get_EP([], test_dict, ans_dict), 0) def test_get_ER(self): test_dict = { 1: [('foo', 0.3), ('bar', 0.7)], 2: [('baz', 0.8), ('bah', 0.2)]} ans_dict = { 1: ['bar', 'jack'], 2: ['all', 'work', 'baz'] } self.assertAlmostEqual(utils.get_ER([1, 2], test_dict, ans_dict), 5.0/12) self.assertAlmostEqual(utils.get_ER([1], test_dict, ans_dict), 1.0/2) self.assertAlmostEqual(utils.get_ER([2], test_dict, ans_dict), 1.0/3) def test_get_ER(self): test_dict = { 1: [('foo', 0.3), ('bar', 0.7)], 2: [('baz', 0.8), ('bah', 0.2)]} ans_dict = { 1: [], 2: ['all', 'work', 'baz'] } # key's ans_dict value is [] self.assertAlmostEqual(utils.get_ER([1, 2], test_dict, ans_dict), 1.0/6) # query_list is empty self.assertAlmostEqual(utils.get_ER([], test_dict, ans_dict), 0) def test_get_HM(self): self.assertAlmostEqual(utils.get_HM(3, 3), 3, 1) self.assertAlmostEqual(utils.get_HM(3, 0), 0, 1) self.assertAlmostEqual(utils.get_HM(0.3, 0.7), 0.42, 1) self.assertAlmostEqual(utils.get_HM(0.5, 0.5), 0.5 , 1) self.assertAlmostEqual(utils.get_HM(0.1, 0.9), 0.18, 1) def test_get_HM_corner_cases(self): self.assertAlmostEqual(utils.get_HM(0.0, 3.0), 0.0, 1) self.assertAlmostEqual(utils.get_HM(0.3, 0.0), 0.0, 1) self.assertAlmostEqual(utils.get_HM(0.0, 0.0), 0.0, 1) def test_partition(self): given_list = [1, 2, 3, 4, 5, 6, 7, 8] indices1 = [1] indices2 = [1, 5] indices3 = [1, 3, 5] ans1 = [[1], [2, 3, 4, 5, 6, 7, 8]] ans2 = [[1], [2, 3, 4, 5], [6, 7, 8]] ans3 = [[1], [2, 3], [4, 5], [6, 7, 8]] self.assertEqual(utils.partition(given_list, indices1), ans1) self.assertEqual(utils.partition(given_list, indices2), ans2) self.assertEqual(utils.partition(given_list, indices3), ans3) def test_is_sorted(self): sorted_list = [2, 3, 6, 7] unsorted_list = [3, 2, 6, 7] self.assertTrue(utils.is_sorted(sorted_list)) self.assertFalse(utils.is_sorted(unsorted_list)) def test_get_splits(self): run_on_word = 'giantkick' ans_1_splits = [['giant', 'kick'],] ans_2_splits = [] self.assertEqual(utils.get_splits(run_on_word, 1, self.lexicon), ans_1_splits) self.assertEqual(utils.get_splits(run_on_word, 2, self.lexicon), ans_2_splits) def test_get_corrected_run_on_queries(self): query_3_words = Suggestion(['footballhalloffame', 'rocks']) ans_3_words = [Suggestion(['football', 'hall', 'of', 'fame', 'rocks'])] query_2_words = Suggestion(['giantcell', 'M']) ans_2_words = [Suggestion(['giant', 'cell', 'M'])] self.assertEqual(utils.get_corrected_run_on_queries(query_2_words, self.lexicon), ans_2_words) self.assertEqual(utils.get_corrected_run_on_queries(query_3_words, self.lexicon), ans_3_words) def test_get_corrected_split_queries(self): # No splits query_1_word = ['fast'] ans_1_word = [] # one split, total two words query_2_word = ['forw', 'ard'] ans_2_word = [['forward']] # one split, total three words query_3_word = ['forw', 'ard', 'march'] ans_3_word = [['forward', 'march']] # one split, total four words query_4_word = ['fast', 'forw', 'ard', 'march'] ans_4_word = [['fast', 'forward', 'march']] queries = [query_1_word, query_2_word, query_3_word, query_4_word] queries = [Suggestion(query) for query in queries] answers = [ans_1_word, ans_2_word, ans_3_word, ans_4_word] for i in xrange(4): self.assertEqual(utils.get_corrected_split_queries(queries[i], self.lexicon), answers[i]) def test_get_normalized_probabilities(self): probability_list = [0.2, 0.3, 0.2] ans = [0.28571428571428575, 0.4285714285714286, 0.28571428571428575] actual_list = utils.get_normalized_probabilities(probability_list) for i in xrange(len(actual_list)): self.assertAlmostEqual(actual_list[i], ans[i]) self.assertAlmostEqual(sum(actual_list), 1.0) def get_suite(): suite = unittest.TestLoader().loadTestsFromTestCase(UtilsTest) return suite if __name__ == '__main__': suite = get_suite() unittest.TextTestRunner(verbosity=2).run(suite)
UTF-8
Python
false
false
6,470
py
28
test_utils.py
17
0.487326
0.443586
0
168
37.511905
89
viswanathgs/pantheon
9,053,791,094,977
b824ed4ad18ce59fb8a99591fb94c13c20c47c3b
7e7190150e7eaba1d56a4c1ea9c0caa0fc6b55d7
/src/analysis/analyze.py
8269eef02254fec2274499fa14a564ed69a6f38d
[]
no_license
https://github.com/viswanathgs/pantheon
eea6656fadf8dba9b912af39a4e5e418466c6fb6
5937edc3c99309d8a84d545c33d08f5e45bbbfb5
refs/heads/master
2020-07-29T10:16:41.136091
2020-06-09T13:56:45
2020-06-09T13:56:45
209,759,714
1
1
null
true
2020-06-09T13:56:47
2019-09-20T09:56:23
2020-05-12T16:42:44
2020-06-09T13:56:46
2,920
0
1
0
Python
false
false
#!/usr/bin/env python from os import path import arg_parser import context from helpers.subprocess_wrappers import check_call def main(): args = arg_parser.parse_analyze() analysis_dir = path.join(context.src_dir, 'analysis') plot = path.join(analysis_dir, 'plot.py') report = path.join(analysis_dir, 'report.py') plot_cmd = ['python2', plot] report_cmd = ['python2', report] for cmd in [plot_cmd, report_cmd]: if args.data_dir: cmd += ['--data-dir', args.data_dir] if args.schemes: cmd += ['--schemes', args.schemes] if args.include_acklink: cmd += ['--include-acklink'] check_call(plot_cmd) check_call(report_cmd) if __name__ == '__main__': main()
UTF-8
Python
false
false
760
py
49
analyze.py
43
0.597368
0.594737
0
33
22.030303
57
lx10077/fedavgpy
18,519,898,988,103
32bbfa03548aacd23198eb2af85de2ac8cd0fb8d
c0852384a61eaade525ba60c0212dc81edffad34
/main.py
82bcba4043535db1cc586eba9fc546b37dfa4180
[ "MIT" ]
permissive
https://github.com/lx10077/fedavgpy
2993dcc035ad96923a08805f954394acdbac5ae1
400160899c4ca728da519d405e05e5210ba53d91
refs/heads/master
2022-12-11T16:02:05.961301
2022-12-07T01:14:28
2022-12-07T01:14:28
223,700,059
233
68
MIT
false
2022-12-06T13:34:14
2019-11-24T06:03:23
2022-12-01T20:52:01
2022-12-06T13:34:13
864
164
58
3
Python
false
false
import numpy as np import argparse import importlib import torch import os from src.utils.worker_utils import read_data from config import OPTIMIZERS, DATASETS, MODEL_PARAMS, TRAINERS def read_options(): parser = argparse.ArgumentParser() parser.add_argument('--algo', help='name of trainer;', type=str, choices=OPTIMIZERS, default='fedavg4') parser.add_argument('--dataset', help='name of dataset;', type=str, default='mnist_all_data_0_equal_niid') parser.add_argument('--model', help='name of model;', type=str, default='logistic') parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001) parser.add_argument('--gpu', action='store_true', default=False, help='use gpu (default: False)') parser.add_argument('--noprint', action='store_true', default=False, help='whether to print inner result (default: False)') parser.add_argument('--noaverage', action='store_true', default=False, help='whether to only average local solutions (default: True)') parser.add_argument('--device', help='selected CUDA device', default=0, type=int) parser.add_argument('--num_round', help='number of rounds to simulate;', type=int, default=200) parser.add_argument('--eval_every', help='evaluate every ____ rounds;', type=int, default=5) parser.add_argument('--clients_per_round', help='number of clients trained per round;', type=int, default=10) parser.add_argument('--batch_size', help='batch size when clients train on data;', type=int, default=64) parser.add_argument('--num_epoch', help='number of epochs when clients train on data;', type=int, default=5) parser.add_argument('--lr', help='learning rate for inner solver;', type=float, default=0.1) parser.add_argument('--seed', help='seed for randomness;', type=int, default=0) parser.add_argument('--dis', help='add more information;', type=str, default='') parsed = parser.parse_args() options = parsed.__dict__ options['gpu'] = options['gpu'] and torch.cuda.is_available() # Set seeds np.random.seed(1 + options['seed']) torch.manual_seed(12 + options['seed']) if options['gpu']: torch.cuda.manual_seed_all(123 + options['seed']) # read data idx = options['dataset'].find("_") if idx != -1: dataset_name, sub_data = options['dataset'][:idx], options['dataset'][idx+1:] else: dataset_name, sub_data = options['dataset'], None assert dataset_name in DATASETS, "{} not in dataset {}!".format(dataset_name, DATASETS) # Add model arguments options.update(MODEL_PARAMS(dataset_name, options['model'])) # Load selected trainer trainer_path = 'src.trainers.%s' % options['algo'] mod = importlib.import_module(trainer_path) trainer_class = getattr(mod, TRAINERS[options['algo']]) # Print arguments and return max_length = max([len(key) for key in options.keys()]) fmt_string = '\t%' + str(max_length) + 's : %s' print('>>> Arguments:') for keyPair in sorted(options.items()): print(fmt_string % keyPair) return options, trainer_class, dataset_name, sub_data def main(): # Parse command line arguments options, trainer_class, dataset_name, sub_data = read_options() train_path = os.path.join('./data', dataset_name, 'data', 'train') test_path = os.path.join('./data', dataset_name, 'data', 'test') # `dataset` is a tuple like (cids, groups, train_data, test_data) all_data_info = read_data(train_path, test_path, sub_data) # Call appropriate trainer trainer = trainer_class(options, all_data_info) trainer.train() if __name__ == '__main__': main()
UTF-8
Python
false
false
4,838
py
21
main.py
19
0.506408
0.500827
0
131
35.931298
91
moidshaikh/hackerranksols
17,952,963,321,866
61afac15cba6461b0ffb3e2a58ad74af17b1d211
53a9e3b7f9e7dba6404c354d1e35496f198bb9dd
/hackerrank/designer_doormat.py
e2718bbf3c745ab8db1875af37dc4a06fb89e057
[]
no_license
https://github.com/moidshaikh/hackerranksols
25eafcb16721bd5ac9f9cd2d085c24ba3a1f67f5
0350d0968b0639bcae53d8e3f3e53d36e191d96a
refs/heads/master
2023-01-23T22:21:48.077394
2023-01-16T20:49:15
2023-01-16T20:49:15
74,232,123
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Enter your code here. Read input from STDIN. Print output to STDOUT n,m = map(int,input().split()) # 3, 9, 15, 21, 27, 33 # 1, 3, 05, 07, 09, 11 li = list(range(1,n,2)) + [n] + list(range(n-2,-1,-2)) for i in li: if i==n: print("WELCOME".center(m, '-')) else: print((".|."*i).center(m, '-'))
UTF-8
Python
false
false
320
py
140
designer_doormat.py
130
0.525
0.446875
0
13
23.692308
69
ShenTonyM/LeetCode-Learn
1,726,576,873,986
e42e7ad6ddd5eb9e5c25c1a7a54a4b16a00161e4
85dd85a765e845f3c0ecd8d5a0b4cfe4685e44c5
/Q617MergeTwoBinaryTrees.py
9e4328a2d74d4b7747d892fde8e30c8ab92ef67c
[]
no_license
https://github.com/ShenTonyM/LeetCode-Learn
674d4249369f0b534d5245bca11fa78218efe7e9
37ece0a8e92a41ced2b4ce0f2d8dda3826b915ae
refs/heads/master
2020-03-31T06:58:16.474518
2019-07-30T05:22:23
2019-07-30T05:22:23
152,001,874
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from collections import deque # Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def mergeTreesRecur(self, t1, t2, father, left_right_flag): if t1 and t2: t1.val += t2.val self.mergeTreesRecur(t1.left, t2.left, t1, "left") self.mergeTreesRecur(t1.right, t2.right, t1, "right") return elif t2: if left_right_flag == "left": father.left = t2 elif left_right_flag == "right": father.right = t2 return else: return def mergeTrees(self, t1, t2): """ :type t1: TreeNode :type t2: TreeNode :rtype: TreeNode """ if not t1 and not t2: return None elif not t2: return t1 elif not t1: return t2 else: t1.val += t2.val self.mergeTreesRecur(t1.left, t2.left, t1, "left") self.mergeTreesRecur(t1.right, t2.right, t1, "right") return t1 def level_order_traverse(self, root): """ :type root: TreeNode :rtype: None """ tree_node_list = deque() tree_node_list.append(root) while tree_node_list: root = tree_node_list.popleft() if root.left: tree_node_list.append(root.left) if root.right: tree_node_list.append(root.right) print(root.val) return def buildTree(self, tree_data): """ :type tree_data: list :rtype: TreeNode """ tree_node_list = [] for i in range(len(tree_data)): if tree_data[i] != 'null': tree_node_list.append(TreeNode(tree_data[i])) else: tree_node_list.append(None) for i in range(len(tree_data)): if tree_node_list[i]: if 2 * i + 1 < len(tree_data): tree_node_list[i].left = tree_node_list[2 * i + 1] if 2 * i + 2 < len(tree_data): tree_node_list[i].right = tree_node_list[2 * i + 2] else: tree_node_list[i].left = None tree_node_list[i].right = None return tree_node_list[0] if __name__ == "__main__": t1 = Solution().buildTree([1,3,2,5]) # Solution().level_order_traverse(t1) t2 = Solution().buildTree([2,1,3,"null",4,"null",7]) t_merge = Solution().mergeTrees(t1, t2) Solution().level_order_traverse(t_merge)
UTF-8
Python
false
false
2,714
py
156
Q617MergeTwoBinaryTrees.py
155
0.491157
0.470155
0
93
28.193548
75
davidandym/goodtoknow
13,958,643,716,942
b583761086797825d580d4ac89cf5064a3eb8d9f
ae9f03e5adff9bb1e96ff01163a57d92f8bde0f9
/ctc/m/contiguous_seq.py
b21d17270f53d781839b4a9de2fcb77b2eed8526
[]
no_license
https://github.com/davidandym/goodtoknow
f3d46d7f9ffd10f5bdf49364ee6d74b96dce9225
1dabb9f6a3a9dbdb90716c8d9b8ee40c669e6cb6
refs/heads/master
2017-12-22T09:43:35.387065
2017-12-19T01:46:46
2017-12-19T01:46:46
27,935,865
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# find contiguous seq with largest sum def contiguous_seq(arr): if len(arr) <= 1: return arr[0], arr max_sum = arr[0] cur_sum = arr[0] seq_beg = 0 seq_end = 0 max_seq_beg = 0 max_seq_end = 0 for i in xrange(0, len(arr)): # special cases so we do the right thing for neg if i > 0: cur_sum += arr[i] if cur_sum > max_sum: max_sum = cur_sum max_seq_beg = seq_beg max_seq_end = seq_end if cur_sum < 0: cur_sum = 0 seq_beg = i + 1 seq_end = i + 1 else: seq_end += 1 return max_sum, arr[max_seq_beg: max_seq_end+1] if __name__ == "__main__": arr = [-8, 3, -2, 4, -10] print contiguous_seq(arr), 'should be (5, [3, -2, 4])' arr = [-8, 3, -2, 4, -10, 1000, -5] print contiguous_seq(arr), 'should be (1000, [1000])' arr = [-8, 3, -2, 4, 1000, -2000] print contiguous_seq(arr), 'should be (1005, [3, -2, 4, 1000])' arr = [-2, -5, -3] print contiguous_seq(arr), 'should be (-2, [-2])' arr = [-3] print contiguous_seq(arr), 'should be (-3, [-3])'
UTF-8
Python
false
false
1,158
py
20
contiguous_seq.py
18
0.482729
0.417098
0
39
28.717949
67
ThanksgivingChang/dangdang
16,406,775,111,275
daf445106009bce1d059684464ea86e2a6abaf9a
270ddeb51d526feab3fedc2034c4e7ed8a714edd
/indent/migrations/0001_initial.py
817cb303090f018615a0aa3877ec6316ab96c567
[]
no_license
https://github.com/ThanksgivingChang/dangdang
aeccb3d40887cfce795aff3157aa89dce842631f
af8154a7bd4e9808f6f6b8d3c8624ef2eaad583b
refs/heads/master
2020-04-14T11:22:43.853969
2019-01-02T08:04:11
2019-01-02T08:04:11
163,807,950
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 2.0.2 on 2018-09-10 16:37 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('login', '0001_initial'), ] operations = [ migrations.CreateModel( name='Address', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('consignee', models.CharField(max_length=50)), ('detailAddress', models.CharField(max_length=200)), ('postalcode', models.CharField(max_length=10)), ('telephone', models.CharField(max_length=12, null=True)), ('mobilephone', models.CharField(max_length=15, null=True)), ('userid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User')), ], options={ 'db_table': 'd_address', }, ), migrations.CreateModel( name='OrderItems', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('productName', models.CharField(max_length=100)), ('price', models.FloatField()), ('amount', models.IntegerField()), ('subtotal', models.FloatField()), ], options={ 'db_table': 'd_orderitems', }, ), migrations.CreateModel( name='Orders', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('orderNumber', models.CharField(max_length=30)), ('dateInProduct', models.DateTimeField()), ('status', models.CharField(max_length=10)), ('freight', models.FloatField()), ('expenditure', models.FloatField(verbose_name=0)), ('address', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='indent.Address')), ('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User')), ], options={ 'db_table': 'd_orders', }, ), migrations.AddField( model_name='orderitems', name='orderid', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='indent.Orders'), ), ]
UTF-8
Python
false
false
2,636
py
30
0001_initial.py
20
0.538316
0.5239
0
65
39.553846
125
Leko25/SnapStitch
16,192,026,732,853
21d41e7d52101e5a98796de4baaf52a794caa8db
16c0fbf8bb17a0372dcb88c8c7ee5dd5f490a589
/server.py
d1f79b1004972e7d057931d377d5e173030aa696
[]
no_license
https://github.com/Leko25/SnapStitch
c40a83463f9828e91da977bcbc939f0168e6da25
d19b552b190767a854c2768f3697dceb811852c2
refs/heads/master
2021-09-25T03:59:23.094640
2018-02-18T22:12:58
2018-02-18T22:12:58
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import Flask, request, jsonify, render_template, abort, send_from_directory from flask_cors import CORS, cross_origin import time import os import random import threading from video_processing import main as video_processing app = Flask(__name__) ALLOWED_EXTENSIONS= set(["mp4"]) def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/results/<filename>') def results(filename): if not os.path.isfile(os.path.join('results', 'finished_'+filename[:filename.rindex('.')]+'.txt')): abort(404) return return send_from_directory('results',filename) @app.route('/<groupid>') def index(groupid): return render_template('index.html', groupid = groupid) @app.route('/file/<groupid>') def file(groupid): if os.path.isfile(os.path.join(os.getcwd(),'results/{0}.m4v'.format(groupid))): return jsonify(groupid=groupid) else: abort(404) @app.route('/add') def add(): return render_template('add.html') @app.route('/loading/<groupid>') def loading(groupid): return render_template('loading.html', groupid=groupid) @app.route('/upload', methods=["POST"]) def upload(): starttime = time.time() groupid = None if request.args.get('groupid'): groupid = request.args.get('groupid') else: groupid = (int(time.time()) * 1000) + random.randint(0,999) f = request.files.getlist('vidfiles') savelocation = './videos/{0}'.format(groupid) if not os.path.exists(savelocation): os.makedirs(savelocation) for file in f: file.save(os.path.join(savelocation,file.filename.lower())) endtime = time.time() totaltime = endtime-starttime thread = threading.Thread(target=video_processing, args=(str(groupid),)) thread.start() return jsonify(groupid=groupid, time=totaltime) if __name__ == '__main__': app.run(debug=True, threaded=True, host="0.0.0.0", port=6006)
UTF-8
Python
false
false
1,882
py
17
server.py
9
0.703507
0.688629
0
64
28.40625
103
niksonbarth/sale
14,259,291,426,071
f980478e6fe6a284c38d4305d8db6ebfd13a122f
cf7300f764125f63452dee5e9aa78efcc3f58b3e
/catalog/admin.py
830dfc0eeee8b57952f66b2f07b47faf3f5e4241
[]
no_license
https://github.com/niksonbarth/sale
ade936c5ecaf292c85423b3abe2c626eb4a7b86b
08feb285de44c75ac3f00c51e0f9587969fab728
refs/heads/master
2021-05-10T18:10:49.597640
2018-02-15T09:50:39
2018-02-15T09:50:39
118,531,963
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding=utf-8 from django.contrib import admin from .models import Product, Category, SuperMarket, Ad class CategoryAdmin(admin.ModelAdmin): list_display = ['name', 'slug', 'created', 'modified'] search_fields = ['name', 'slug'] list_filter = ['created', 'modified'] class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'slug', 'category', 'created', 'modified'] search_fields = ['name', 'slug', 'category__name'] list_filter = ['created', 'modified'] class SuperMarketAdmin(admin.ModelAdmin): list_display = ['name', 'slug', 'city', 'created', 'modified'] search_fields = ['name', 'slug', 'city'] list_filter = ['created', 'modified'] class AdAdmin(admin.ModelAdmin): list_display = ['product', 'superMarket', 'price', 'created', 'modified'] search_fields = ['product__name', 'superMarket__name', 'price' ] list_filter = ['created', 'modified'] admin.site.register(Category, CategoryAdmin) admin.site.register(Product, ProductAdmin) admin.site.register(SuperMarket, SuperMarketAdmin) admin.site.register(Ad, AdAdmin)
UTF-8
Python
false
false
1,089
py
20
admin.py
14
0.675849
0.674931
0
36
29.25
77
shiroyagicorp/sitq
13,245,679,182,129
775362506507717cdf0d8838217a5b544424ab47
0001b9bcd0050f70f1a9fb5edf30a5f4a7324c2f
/sitq/tests/tests_sitq.py
dd8978d3c95ad0acd1929a42e0a641f61e28c9bc
[ "MIT" ]
permissive
https://github.com/shiroyagicorp/sitq
ef11e0841c1c169217db0f005fab8afc14d0603c
b2c0b321df8450ecfce1183c0f229e87f0661679
refs/heads/master
2022-02-07T00:03:11.750667
2019-02-25T17:21:50
2019-02-25T17:21:50
171,209,711
12
0
MIT
false
2022-01-21T19:50:50
2019-02-18T03:40:03
2021-07-16T07:43:42
2022-01-21T19:50:50
51
8
0
5
Python
false
false
import numpy as np from sitq import Sitq def test_sitq(): items = np.random.randn(10000, 50) queries = np.random.randn(100, 50) sitq = Sitq(signature_size=4).fit(items) assert sitq.get_item_signatures(items).shape == (10000, 4) assert sitq.get_query_signatures(queries).shape == (100, 4)
UTF-8
Python
false
false
310
py
9
tests_sitq.py
6
0.680645
0.606452
0
10
30
63
oanc/Galaxy
13,735,305,429,701
32153c59cf15d0ac84826d48bc5f382115d355b7
4a94c55bb070d383a9e933b65440e9c7a2277af0
/lib/galaxy/web/__init__.py
56a8ae9d4222cc5c88f1271e1a6597a8aa95a7fa
[ "CC-BY-3.0", "LicenseRef-scancode-unknown-license-reference", "CC-BY-2.5", "AFL-2.1", "AFL-3.0" ]
permissive
https://github.com/oanc/Galaxy
3272ec7e447af471ef146c813650258b555e0767
34dda03a5bfccb8eacc21f5ec601e577c96bbf7e
refs/heads/master
2021-01-18T21:08:05.254586
2016-11-13T16:41:48
2016-11-13T16:41:48
33,634,998
0
1
null
false
2015-06-03T16:08:27
2015-04-08T22:13:01
2015-04-13T22:24:58
2015-06-03T16:08:26
54,601
0
1
0
Python
null
null
""" The Galaxy web application framework """ from framework import url_for from framework.decorators import error from framework.decorators import expose from framework.decorators import json from framework.decorators import json_pretty from framework.decorators import require_login from framework.decorators import require_admin from framework.decorators import expose_api from framework.decorators import expose_api_anonymous from framework.decorators import expose_api_raw from framework.decorators import expose_api_raw_anonymous # TODO: Drop and make these the default. from framework.decorators import _future_expose_api from framework.decorators import _future_expose_api_anonymous from framework.decorators import _future_expose_api_raw from framework.decorators import _future_expose_api_raw_anonymous from framework.decorators import _future_expose_api_anonymous_and_sessionless from framework.decorators import _future_expose_api_raw_anonymous_and_sessionless from framework.formbuilder import form from framework.formbuilder import FormBuilder from framework.base import httpexceptions __all__ = ['url_for', 'error', 'expose', 'json', 'json_pretty', 'require_admin', 'require_login', 'expose_api', 'expose_api_anonymous', 'expose_api_raw', 'expose_api_raw_anonymous', '_future_expose_api', '_future_expose_api_anonymous', '_future_expose_api_raw', '_future_expose_api_raw_anonymous', '_future_expose_api_anonymous_and_sessionless', '_future_expose_api_raw_anonymous_and_sessionless', 'form', 'FormBuilder', 'httpexceptions']
UTF-8
Python
false
false
1,620
py
28
__init__.py
13
0.768519
0.768519
0
37
42.783784
82
masyud303/histogramspecification
8,778,913,196,333
ffaeb97e086f18dcc6a2003767e435848a78a2d4
f6d1c5c7bc98d6868feb92106d44c8b909d4d046
/histogramspecification.py
5903f0edc3829a1a75ff067ed1ed74155cb61a51
[]
no_license
https://github.com/masyud303/histogramspecification
c33939e0cfce0429bd2a386acd8ed0fdad349e1e
144bb0c6ca9d2cd92f0509cafcb3dd9bd16e5913
refs/heads/master
2022-12-06T23:51:16.173431
2020-08-02T07:21:08
2020-08-02T07:21:08
284,411,667
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Created on Tue August 27 09:42:57 2019 @coded by: yudhiprabowo """ import cv2 import numpy as np import calculation as calc inp = "D:\\PYTHON\\input.jpg" ref = "D:\\PYTHON\\reference.jpg" out = "D:\\PYTHON\\output.jpg" iarr = cv2.imread(inp) rarr = cv2.imread(ref) irow, icol, iband = iarr.shape rrow, rcol, rband = rarr.shape oarr = np.asarray(calc.histmatch(iarr, rarr, irow, icol, rrow, rcol, 3)) cv2.imwrite(out, oarr)
UTF-8
Python
false
false
453
py
2
histogramspecification.py
1
0.660044
0.622517
0
21
19.571429
72
edcross/knn
4,844,723,111,282
cbce247ada1c7951408c2a92b1bc312762fbe2c3
79902680f3466ad0ac13157f9111c5d85920eede
/find.py
d69a4976f3fc168ba6a61af0dff907d1285672ba
[]
no_license
https://github.com/edcross/knn
21c623177d75f599c2e8ba7bb312aa9851ac8804
f568e6b64f01bd6b64bf85bdc1ad1660f8a7f0cd
refs/heads/master
2021-05-12T15:13:34.251999
2018-02-02T14:40:22
2018-02-02T14:40:22
116,978,413
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#import h5py import numpy as np from annoy import AnnoyIndex import random import csv import os,sys #-------------- c = [0,0,0,0,0,0,0,0,0,0] def IndexFinding(inde): cat=int(0) with open('Index_Cat.csv') as csvfile: #readCSV = csv.reader(csvfile, delimiter='\"') readCSV = csv.reader(csvfile, delimiter=';') #readCSV = csv.reader(csvfile) for index in readCSV: if int(index[0]) == inde: cat = int(index[1]) #print (index[0] + "-" + index[1]+"-" +str(inde)) break return cat #-------------- def MasVotado(n): num = n+1 if num == 1: c[n] = c[n] + 1 else: if num == 2: c[n] = c[n] + 1 else: if num == 3: c[n] = c[n-1] + 1 else: if num == 4: c[n] = c[n] + 1 else: if num == 5: c[n] = c[n] + 1 else: if num == 6: c[n] = c[n] + 1 else: if num == 7: c[n] = c[n] + 1 else: if num == 8: c[n] = c[n] + 1 else: if num == 9: c[n] = c[n] + 1 else: if num == 10: c[n] = c[n] + 1 return c #--------------- #---/Split string by row/--- #features = [] features = "" u = AnnoyIndex(2048) u.load('test.ann') resultado = [] categoriaPred = int(0) with open('TestData.csv') as csvfile: readCSV = csv.reader(csvfile, delimiter='\"') for row in readCSV: c = [0,0,0,0,0,0,0,0,0,0] contador = [] index = row[0].split(",") categoria = index[1] #print (index[0] + "-" + index[1]) indice = int(index[0]) features = row[1].split(",") caracteristica = [] for feature in features: caracteristica.append(float(feature)) #print(u.get_nns_by_item(indice, 5)) # will find the 1000 nearest neighbors #print(features) resultado = u.get_nns_by_vector(caracteristica, 5) print("vector: ") print(resultado) #print(u.get_nns_by_vector(caracteristica, 5)) for dato in resultado: categoriaPred = int(IndexFinding(dato)) contador = MasVotado(int(categoriaPred - 1)) print(dato) #print(contador) #print("GT: "+ str(categoria)) #print("Pred: "+ str(categoriaPred)) print("Pred: ") winner = np.argmax(contador) print(winner) print(contador)
UTF-8
Python
false
false
3,072
py
10
find.py
10
0.385742
0.363607
0
116
25.439655
83
luamfmenezes/python-opencv-course
19,112,604,486,808
e10633ccaf3470c6b07b2c666c09c430fb0e9cdf
460a2f0b2d11163d1dcff2e10671d45c704e65d4
/24-WatershedSeeds.py
7d26bbdb6198224626ea9d5e6686e709cd51af8f
[]
no_license
https://github.com/luamfmenezes/python-opencv-course
0ee22b3bb9808c23efe998ee17c5f8642496cadb
b9aee4558f0b468c9ae47f9a9dd1e1a3e938bed4
refs/heads/master
2023-02-04T15:51:13.018078
2020-12-14T00:35:38
2020-12-14T00:35:38
318,673,852
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np from matplotlib import cm import cv2 # ------------------------------------------------------- WaterShed road = cv2.imread('assets/images/road_image.jpg') roady_copy = np.copy(road) road_shape = road.shape[:2] marker_image = np.zeros(road_shape,dtype=np.int32) segments = np.zeros(road.shape,dtype=np.uint8) def create_rgb(i): return tuple(np.array(cm.tab10(i)[:3]) * 255) colors = [] for i in range(10): colors.append(create_rgb(i)) num_markers = 10 current_marker = 1 marks_updated = False def mouse_callback(event,x,y,flags,param): global marks_updated if event == cv2.EVENT_LBUTTONDOWN: # Markaers passed to the water cv2.circle(marker_image,(x,y),10,(current_marker),-1) #User sees on the road Image cv2.circle(roady_copy,(x,y),10,colors[current_marker],-1) marks_updated = True cv2.namedWindow('Road Image') cv2.setMouseCallback('Road Image',mouse_callback) while True: cv2.imshow('WaterShed Segments',segments) cv2.imshow('Road Image',roady_copy) # Close all windows key = cv2.waitKey(1) if key == 27: break # clearing all the colors presing key 'c' elif key == ord('c'): roady_copy = road.copy() marker_image = np.zeros(road_shape,dtype=np.int32) segments = np.zeros(road.shape,dtype=np.uint8) # update color choice elif key > 0 and chr(key).isdigit(): current_marker = int(chr(key)) # update the markings elif marks_updated: marker_image_copy = marker_image.copy() cv2.watershed(road,marker_image_copy) segments = np.zeros(road.shape,dtype=np.uint8) for color_ind in range(num_markers): segments[marker_image_copy == (color_ind)] = colors[color_ind] marks_updated=False cv2.destroyAllWindows()
UTF-8
Python
false
false
1,857
py
32
24-WatershedSeeds.py
30
0.626279
0.6042
0
81
21.938272
75
spaethju/transmembrane-identifier
3,109,556,352,450
12a7e884e30d129c9aca7cef492844e89cfb9dbc
c9cb6c3a08587b4d05385dc343a2c046e9fd9854
/src/membrane_orientation/pdb_io.py
12d54ce637db4f558a44307cf974dbd8ab8068ce
[]
no_license
https://github.com/spaethju/transmembrane-identifier
3208479ab48a64d234d7dcb526fc5bf3d390bc5f
3a4ab97bd6a61fc76b6684dcc5117a0476d2fdf9
HEAD
2018-10-06T17:35:37.691387
2018-06-26T08:21:51
2018-06-26T08:21:51
133,693,999
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from Bio.PDB import * from Bio.PDB.PDBExceptions import PDBConstructionWarning import numpy as np import warnings def parse_PDB(filepath): """ Parses a PDB file from a given filepath using PDB Parser. Will check for Pilus & Virus proteins as well as DNA/RNA containing proteins. If a PDB file contains multiple models, the user is prompted to select one. :param filepath: Path to the PDB file :return: PDBParser model of the Protein """ pdb_parser = PDBParser() # Actual parsing with warnings.catch_warnings(): warnings.simplefilter('ignore', PDBConstructionWarning) structure = pdb_parser.get_structure('protein', filepath) # Check for Pilus / Virus / DNA / RNA filter_words_name = ['pilus', 'pili', 'Pilus', 'Pili', 'PILUS', 'PILI', 'virus', 'Virus', 'VIRUS'] filter_words_head = ['dna', 'DNA', 'rna', 'RNA'] if any(word in structure.header['name'] for word in filter_words_name) or \ any(word in structure.header['head'] for word in filter_words_head): print('Given structure is either a Pilus or Virus protein or contains nucleotides.') print('Will not be analysed.') return None # Select model from structure models = [model for model in structure] if len(models) > 1: model_number = input(str(len(models))+" models found. Please enter model id (1-index): ") model = models[int(model_number)-1] else: model = models[0] return model def position_model(model): """ Moves a PDBParser model to its center. :param model: PDBParser model :return: PDBParser model, with adjusted coordinates """ total_vector = np.array([0, 0, 0]) count = 0 for chain in model: for residue in chain: for atom in residue: count += 1 total_vector = total_vector + atom.coord previous_position = total_vector/count model.transform(rot=rotaxis2m(theta=0, vector=Vector(1, 0, 0)), tran=-previous_position) return model, previous_position #model = parse_PDB('./structures/pdb1u9j.pdb') #model = position_model(model)
UTF-8
Python
false
false
2,148
py
2,327
pdb_io.py
12
0.653631
0.646182
0
61
34.229508
102
Naganandhinisri/python-beginner
9,431,748,202,400
b054e67591020aecb4dab8fe5c3a5f6e4367a5ed
2672ad4d130a01fe81e3291558babd8da23a997f
/hacker_rank/swapcase.py
cf79919567a6a7efa34c8bc3844e09203c35462f
[]
no_license
https://github.com/Naganandhinisri/python-beginner
04d6a577ccb5a1d847ee302c310d866111a37b8a
0cfa19077dfaf2f74f39c81062ee675e5659e02e
refs/heads/master
2020-04-19T04:11:59.041153
2019-03-29T04:37:19
2019-03-29T04:37:19
167,956,299
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def swap(): s = input("enter the word") word = s.swapcase() print(word) while True: swap()
UTF-8
Python
false
false
106
py
99
swapcase.py
99
0.566038
0.566038
0
6
16.833333
31
robot-lab/rcs-business-layer
1,056,561,989,950
8eba4ab2004cf8d8bcfc3dffbbddc6158a86b943
7aedc086612772f4e514fadf7ad3c80439ea4910
/Planner/planner.py
431f73bc25d27387b2bdfc0e4ba7cb7aa6daba06
[ "Apache-2.0", "MIT" ]
permissive
https://github.com/robot-lab/rcs-business-layer
78a37cbda0739ff44c8b85ee0e8a8fbb663972e4
a312b92e71f17b3a02853d3fa55821887b4b24f2
refs/heads/master
2018-11-25T10:21:46.836491
2018-11-25T07:43:47
2018-11-25T07:43:47
141,130,975
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging import json import time # ATTENTION! Before use this class configure your logging. class Planner: SPECIAL_SYMBOL = '$' CONCAT_SYMBOL = '+' SEPARATED_SYMBOL = '!' EPS = 2000 def __init__(self, sock_rob_ad, sock_scene3d, robo_dict, buffer_size): self.ROBO_DICT = robo_dict self.RD = {'f': 'fanuc_world', 't': 'telega'} self.BUFFER_SIZE = buffer_size self.sock_rob_ad = sock_rob_ad self.sock_scene3d = sock_scene3d def get_all_data_from_scene3d(self): self.sock_scene3d.send(b'get_scene') message_from_scene3d = \ self.sock_scene3d.recv(self.BUFFER_SIZE).decode() return json.loads(message_from_scene3d) def data_convert_json_to_str_byte(self, command, receiver, default_robot_with_sensors='f'): if command == 'sensors': # WARNING: make sure that robot with further ID has sensors! command = default_robot_with_sensors data_str_byte = f'{receiver}: {command}|'.encode() print(data_str_byte) return data_str_byte def is_exist_in_scene3d(self, object_name): data_from_scene3d = self.get_all_data_from_scene3d() return object_name in data_from_scene3d def try_get_data_from_sensors(self, receiver, object_name, checks_number=3, time_delay=3): print('Try to found', object_name, 'in', receiver) for _ in range(checks_number): if self.is_exist_in_scene3d(object_name): return True self.sock_rob_ad.send( self.data_convert_json_to_str_byte('sensors', receiver) ) time.sleep(time_delay) return False def find_parameter(self, command, symbol=SPECIAL_SYMBOL): parameter_begin = command.find(symbol) if parameter_begin != -1: parameter_end = command.find(symbol, parameter_begin + 1) if parameter_end == -1: raise ValueError(f'Did not find end of parameter in command: ' f'{command}.') return command[parameter_begin:parameter_end + 1] return None def get_parameter_from_scene3d(parameter): self.sock_scene3d.send(f'get {parameter}'.encode()) data_from_scene_3d = self.sock_scene3d.recv(self.BUFFER_SIZE).decode() return data_from_scene_3d def get_data_and_replace_parameter(self, command, receiver, parameter): # Try to get object from scene 3d at first time. data_from_scene_3d = get_parameter_from_scene3d(parameter) if data_from_scene_3d == 'None': # Callng sensors several times. if not self.try_get_data_from_sensors(receiver, parameter): return None # After callng sensors try to get object again. data_from_scene_3d = get_parameter_from_scene3d(parameter) if data_from_scene_3d == 'None': raise ValueError(f'Did not find parameter in scene 3d: ' f'{parameter}.') new_command = command.replace(parameter, data_from_scene_3d) return self.add_offset(new_command, data_from_scene_3d) def add_offset(self, command, data_from_scene_3d, concat_symbol=CONCAT_SYMBOL, separated_symbol=SEPARATED_SYMBOL, command_offset=None): # Find symbol for command with offset. con_pos = command.find(concat_symbol) if con_pos == -1: return command # Find command coordinate. sep_pos = command.find(separated_symbol) if sep_pos == -1: raise ValueError(f'Did not found control symbol at the end of the' f'command: {command}') # Skip space symbols: con_pos + 2 and sep_pos - 1. data_to_add = command[con_pos + 2:sep_pos - 1] coords = [str(float(x) + float(y)) for x, y in zip( data_from_scene_3d.split(' '), data_to_add.split(' ') )] if command_offset is None: # Get command literal (because it place in the beginning) # with space symbol. Add command coordinate at the end. return command[:2] + ' '.join(coords) + command[sep_pos + 1:] raise NotImplementedError('Need to add processing for ' 'non-standard offset') def get_data_from_scene_and_compare(self, sent_command, receiver): # Get all data from scene3d. data_from_scene3d = self.get_all_data_from_scene3d() # Remove all blank chars and last parameter. coords_to_check = sent_command.strip()[1:-1].strip() print(f"coords to check {coords_to_check}") if self.RD[receiver] in data_from_scene3d: # Remove all blank chars. coords_for_check = data_from_scene3d[self.RD[receiver]].strip() print(f"coords for check {coords_for_check}") if coords_to_check == coords_for_check: return True for c, r in set(zip(coords_to_check.split(' '), coords_for_check.split(' '))): if abs(abs(float(c)) - abs(float(r))) > self.EPS: print(float(c)) print(float(r)) return False return True return None def check_command_execution(self, sent_command, receiver): if 'm' not in sent_command: return True result = self.get_data_from_scene_and_compare(sent_command, receiver) if result is not None: if not result: self.sock_scene3d.send(b'set "status": "error_command"') print("Status: error_command") logging.info("Status: error_command") return False self.sock_scene3d.send(b'set "status": "ok"') print("Status: ok") logging.info("Status: ok") return True self.sock_scene3d.send(b'set "status": "error_object"') print("Status: error_object") logging.info("Status: error_object") return False def check_execution_with_delay(self, sent_command, receiver, checks_number=3, time_delay=1): # Check command execution with some delays. if 'm' in sent_command: is_executed = False for j in range(checks_number): if self.check_command_execution(sent_command, receiver): is_executed = True break time.sleep(time_delay * j + time_delay) if not is_executed: print(f"Error: {self.RD[receiver]} in {sent_command}") return False return True def process_simple_task(self, task, task_loader, save_task=True): if save_task: task_loader.save_task(task) result_status = True i = 0 command_number = len(task['Scenario']) while i < command_number: time_1 = int(task['Scenario'][i].get('time')) receiver_1 = task['Scenario'][i].get('name') command_1 = task['Scenario'][i].get('command') # Find parameter in command, try to replace it to data from # scene3d. parameter_name_1 = self.find_parameter(command_1) if parameter_name_1 is not None: command_1 = self.get_data_and_replace_parameter( command_1, receiver_1, parameter_name_1 ) if not command_1: result_status = False break # Imitation of parallel work. Need to improve this piece of code. if task['Scenario'][i].get('parallel') == "true" and \ i + 1 < command_number: self.sock_rob_ad.send( self.data_convert_json_to_str_byte(command_1, receiver_1) ) print('Send to', receiver_1, 'command:', command_1) receiver_2 = task['Scenario'][i + 1].get('name') command_2 = task['Scenario'][i + 1].get('command') parameter_name_2 = self.find_parameter(command_2) if parameter_name_2 is not None: command_2 = self.get_data_and_replace_parameter( command_2, receiver_1, parameter_name_2 ) self.sock_rob_ad.send( self.data_convert_json_to_str_byte(command_2, receiver_2) ) print('Send to', receiver_2, 'command:', command_2) time_2 = int(task['Scenario'][i + 1].get('time')) time.sleep(max(time_1, time_2)) check_1 = self.check_command_execution(command_1, receiver_1) check_2 = self.check_command_execution(command_2, receiver_2) if not check_1: print(f"Error: {self.RD[receiver_1]} in {command_1}") result_status = False break if not check_2: print(f"Error: {self.RD[receiver_2]} in {command_2}") result_status = False break i += 1 else: self.sock_rob_ad.send( self.data_convert_json_to_str_byte(command_1, receiver_1) ) print('Send to', receiver_1, 'command:', command_1) time.sleep(time_1) if not self.check_execution_with_delay(command_1, receiver_1): result_status = False break i += 1 return result_status def process_complex_task(self, task, task_loader): for scenario_task in task['Scenario']: print('Task name:', scenario_task.get('command')) # Load tasks from loader and process it as simple task. if len(scenario_task.get('command').split(' ')) == 1 and \ scenario_task.get('command') != 'f': simple_task = task_loader.load_task( scenario_task.get('command') ) if not self.process_simple_task( simple_task, task_loader, save_task=bool(scenario_task.get('name')) ): break else: self.process_simple_task( task, task_loader, save_task=bool(task.get('name')) ) break
UTF-8
Python
false
false
10,742
py
6
planner.py
4
0.53342
0.522342
0
278
37.640288
79
mountaindude/icon-font-to-png
9,706,626,122,339
b8d75b657a880e420e78c53e5a547a677ca34dbd
8984f544709d74b84fa8f8826cd666ac370e26bd
/icon_font_to_png/test/test_icon_font_downloader.py
4a71517f173c05f804e1a24a421865320626f77c
[ "MIT", "Python-2.0" ]
permissive
https://github.com/mountaindude/icon-font-to-png
6a39ca618ed749831a3b494995198e112ca27751
c5546e899a034edfe5aa36de29dd8c0020350955
refs/heads/master
2022-07-17T21:41:58.559367
2020-05-20T04:44:50
2020-05-20T04:44:50
265,045,329
0
0
MIT
true
2020-05-18T19:47:59
2020-05-18T19:47:58
2020-05-17T02:56:02
2020-05-12T20:19:33
307
0
0
0
null
false
false
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import os import tempfile import pytest from flaky import flaky from icon_font_to_png.icon_font_downloader import ( FontAwesomeDownloader, OcticonsDownloader ) # Tests @flaky @pytest.mark.parametrize("downloader", [ FontAwesomeDownloader, OcticonsDownloader, ]) def test_icon_font_downloader(downloader): """Test initializing Font Awesome Downloader""" # With directory obj = downloader(tempfile.mkdtemp()) obj.download_files() assert os.path.isfile(obj.css_path) assert os.path.isfile(obj.ttf_path) # Without directory obj = downloader() obj.download_files() assert os.path.isfile(obj.css_path) assert os.path.isfile(obj.ttf_path) @pytest.mark.parametrize("downloader", [ FontAwesomeDownloader, OcticonsDownloader, ]) def test_font_awesome_latest_version_number(downloader): """Test that getting latest version number""" obj = downloader(tempfile.mkdtemp()) assert obj.get_latest_version_number()
UTF-8
Python
false
false
1,065
py
16
test_icon_font_downloader.py
10
0.71831
0.717371
0
45
22.666667
56
ANierbeck/WeConnect-python
4,002,909,530,160
15f864dc9eb8656b610bf492a44b7b705b00fe9a
93a22af9eecf565334dc4f2a4502ccb46c9e248f
/weconnect/elements/charging_station.py
326735db775a2d69dcfee4485124e36f238dd8f6
[ "MIT", "Python-2.0" ]
permissive
https://github.com/ANierbeck/WeConnect-python
00639fe4a5435b7213315e6f135734c2e8be6b6f
993225fe261b1c03aa578ed1a5adbe37ba4737dc
refs/heads/main
2023-07-11T14:49:02.013137
2021-08-11T08:34:17
2021-08-11T08:35:42
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging from enum import Enum from weconnect.util import toBool from weconnect.addressable import AddressableObject, AddressableAttribute, AddressableList LOG = logging.getLogger("weconnect") class ChargingStation(AddressableObject): # pylint: disable=too-many-instance-attributes def __init__( self, weConnect, stationId, parent, fromDict, fixAPI=True, ): self.weConnect = weConnect super().__init__(localAddress=stationId, parent=parent) self.id = AddressableAttribute(localAddress='id', parent=self, value=None, valueType=str) self.name = AddressableAttribute(localAddress='name', parent=self, value=None, valueType=str) self.latitude = AddressableAttribute(localAddress='latitude', parent=self, value=None, valueType=float) self.longitude = AddressableAttribute(localAddress='longitude', parent=self, value=None, valueType=float) self.distance = AddressableAttribute(localAddress='distance', parent=self, value=None, valueType=float) self.address = None self.chargingPower = AddressableAttribute(localAddress='chargingPower', parent=self, value=None, valueType=float) self.chargingSpots = AddressableList(localAddress='chargingSpots', parent=self) self.authTypes = AddressableList(localAddress='authTypes', parent=self) self.filteredOut = AddressableAttribute(localAddress='filteredOut', parent=self, value=None, valueType=bool) self.isFavorite = AddressableAttribute(localAddress='isFavorite', parent=self, value=None, valueType=bool) self.operator = None self.isWeChargePartner = AddressableAttribute(localAddress='isWeChargePartner', parent=self, value=None, valueType=bool) self.fixAPI = fixAPI self.update(fromDict) def update( # noqa: C901 # pylint: disable=too-many-branches self, fromDict=None, ): if fromDict is not None: LOG.debug('Create / update charging station') if 'id' in fromDict: self.id.setValueWithCarTime(fromDict['id'], lastUpdateFromCar=None, fromServer=True) else: self.id.enabled = False if 'name' in fromDict: self.name.setValueWithCarTime(fromDict['name'], lastUpdateFromCar=None, fromServer=True) else: self.name.enabled = False if 'latitude' in fromDict: self.latitude.setValueWithCarTime(float(fromDict['latitude']), lastUpdateFromCar=None, fromServer=True) else: self.latitude.enabled = False if 'longitude' in fromDict: self.longitude.setValueWithCarTime(float(fromDict['longitude']), lastUpdateFromCar=None, fromServer=True) else: self.longitude.enabled = False if 'distance' in fromDict: self.distance.setValueWithCarTime(float(fromDict['distance']), lastUpdateFromCar=None, fromServer=True) else: self.distance.enabled = False if 'address' in fromDict: if self.address is None: self.address = ChargingStation.Address(localAddress='address', parent=self, fromDict=fromDict['address']) else: self.address.update(fromDict=fromDict['address']) elif self.address is not None: self.address.enabled = False self.address = None if 'chargingPower' in fromDict: self.chargingPower.setValueWithCarTime(float(fromDict['chargingPower']), lastUpdateFromCar=None, fromServer=True) else: self.chargingPower.enabled = False if 'chargingSpots' in fromDict and fromDict['chargingSpots'] is not None: if len(fromDict['chargingSpots']) == len(self.chargingSpots): for i, spot in enumerate(fromDict['chargingSpots']): self.chargingSpots[i].update(fromDict=spot) else: self.chargingSpots.clear() for spot in fromDict['chargingSpots']: self.chargingSpots.append(ChargingStation.ChargingSpot(localAddress=str( len(self.chargingSpots)), parent=self.chargingSpots, fromDict=spot)) else: self.chargingSpots.enabled = False self.chargingSpots.clear() if 'authTypes' in fromDict and fromDict['authTypes'] is not None: if len(fromDict['authTypes']) == len(self.authTypes): for i, authType in enumerate(fromDict['authTypes']): try: authTypeEnum = ChargingStation.AUTHTYPE(authType) except ValueError: authTypeEnum = ChargingStation.AUTHTYPE.UNKNOWN LOG.warning('An unsupported type: %s was provided, please report this as a bug', authTypeEnum) self.authTypes[i].setValueWithCarTime(authTypeEnum, lastUpdateFromCar=None, fromServer=True) else: self.authTypes.clear() for authType in fromDict['authTypes']: try: authTypeEnum = ChargingStation.AUTHTYPE(authType) except ValueError: authTypeEnum = ChargingStation.AUTHTYPE.UNKNOWN LOG.warning('An unsupported type: %s was provided, please report this as a bug', authTypeEnum) self.authTypes.append(AddressableAttribute(localAddress=len(self.authTypes), parent=self.authTypes, value=authTypeEnum, valueType=ChargingStation.AUTHTYPE)) else: self.authTypes.enabled = False self.authTypes.clear() if 'filteredOut' in fromDict: self.filteredOut.setValueWithCarTime(toBool(fromDict['filteredOut']), lastUpdateFromCar=None, fromServer=True) else: self.filteredOut.enabled = False if 'isFavorite' in fromDict: self.isFavorite.setValueWithCarTime(toBool(fromDict['isFavorite']), lastUpdateFromCar=None, fromServer=True) else: self.isFavorite.enabled = False if 'isWeChargePartner' in fromDict: self.isWeChargePartner.setValueWithCarTime(toBool(fromDict['isWeChargePartner']), lastUpdateFromCar=None, fromServer=True) else: self.isWeChargePartner.enabled = False if 'cpoiOperatorInfo' in fromDict: if self.operator is None: self.operator = ChargingStation.Operator(localAddress='operator', parent=self, fromDict=fromDict['cpoiOperatorInfo']) else: self.operator.update(fromDict=fromDict['cpoiOperatorInfo']) elif self.operator is not None: self.operator.enabled = False self.operator = None for key, value in {key: value for key, value in fromDict.items() if key not in ['id', 'name', 'latitude', 'longitude', 'distance', 'address', 'chargingPower', 'chargingSpots', 'authTypes', 'filteredOut', 'isFavorite', 'isWeChargePartner', 'cpoiOperatorInfo']}.items(): LOG.warning('%s: Unknown attribute %s with value %s', self.getGlobalAddress(), key, value) class AUTHTYPE(Enum): RFID = 'RFID' APP = 'APP' QR = 'QR' NO_AUTH = 'NO_AUTH' UNKNOWN = 'UNKNOWN' class Address(AddressableObject): def __init__( self, localAddress, parent, fromDict=None, ): super().__init__(localAddress=localAddress, parent=parent) self.city = AddressableAttribute(localAddress='city', parent=self, value=None, valueType=str) self.country = AddressableAttribute(localAddress='country', parent=self, value=None, valueType=str) self.postcode = AddressableAttribute(localAddress='postcode', parent=self, value=None, valueType=str) self.street = AddressableAttribute(localAddress='street', parent=self, value=None, valueType=str) if fromDict is not None: self.update(fromDict) def update(self, fromDict): LOG.debug('Update address from dict') if 'city' in fromDict: self.city.setValueWithCarTime(fromDict['city'], lastUpdateFromCar=None, fromServer=True) else: self.city.enabled = False if 'country' in fromDict: self.country.setValueWithCarTime(fromDict['country'], lastUpdateFromCar=None, fromServer=True) else: self.country.enabled = False if 'postcode' in fromDict: self.postcode.setValueWithCarTime(fromDict['postcode'], lastUpdateFromCar=None, fromServer=True) else: self.postcode.enabled = False if 'street' in fromDict: self.street.setValueWithCarTime(fromDict['street'], lastUpdateFromCar=None, fromServer=True) else: self.street.enabled = False for key, value in {key: value for key, value in fromDict.items() if key not in ['city', 'country', 'postcode', 'street']}.items(): LOG.warning('%s: Unknown attribute %s with value %s', self.getGlobalAddress(), key, value) def __str__(self): returnString = '' if self.street.enabled: returnString += f'{self.street.value}, ' if self.postcode.enabled: returnString += f'{self.postcode.value} ' if self.city.enabled: returnString += f'{self.city.value}, ' if self.country.enabled: returnString += f'{self.country.value}' return returnString class ChargingSpot(AddressableObject): def __init__( self, localAddress, parent, fromDict=None, ): super().__init__(localAddress=localAddress, parent=parent) self.connectors = AddressableList(localAddress='connectors', parent=self) self.available = AddressableAttribute(localAddress='available', parent=self, value=None, valueType=ChargingStation.ChargingSpot.AVAILABILITY) self.chargingPower = AddressableAttribute(localAddress='chargingPower', parent=self, value=None, valueType=float) if fromDict is not None: self.update(fromDict) def update(self, fromDict): LOG.debug('Update charging spot from dict') if 'connectors' in fromDict and fromDict['connectors'] is not None: if len(fromDict['connectors']) == len(self.connectors): for i, connector in enumerate(fromDict['connectors']): self.connectors[i].update(fromDict=connector) else: self.connectors.clear() for connector in fromDict['connectors']: self.connectors.append(ChargingStation.ChargingSpot.Connector( localAddress=str(len(self.connectors)), parent=self.connectors, fromDict=connector)) else: self.connectors.enabled = False self.connectors.clear() if 'chargingPower' in fromDict: self.chargingPower.setValueWithCarTime(float(fromDict['chargingPower']), lastUpdateFromCar=None, fromServer=True) else: self.chargingPower.enabled = False if 'available' in fromDict: try: self.available.setValueWithCarTime(ChargingStation.ChargingSpot.AVAILABILITY(fromDict['available']), lastUpdateFromCar=None, fromServer=True) except ValueError: self.available.setValueWithCarTime(ChargingStation.ChargingSpot.AVAILABILITY.UNKNOWN, lastUpdateFromCar=None, fromServer=True) LOG.warning('An unsupported type: %s was provided,' ' please report this as a bug', fromDict['available']) else: self.available.enabled = False for key, value in {key: value for key, value in fromDict.items() if key not in ['connectors', 'chargingPower', 'available', 'plugTypes']}.items(): LOG.warning('%s: Unknown attribute %s with value %s', self.getGlobalAddress(), key, value) def __str__(self): returnString = '' if self.available.enabled: returnString += f'Availability: {self.available.value.value}\n' # pylint: disable=no-member if self.chargingPower.enabled: returnString += f'Max. Charging Power: {self.chargingPower.value}kW\n' returnString += f'Connectors: {len(self.connectors)} items\n' for connector in self.connectors: returnString += ''.join(['\t' + line for line in str(connector).splitlines(True)]) + '\n' return returnString class PlugType(Enum): TYPE1 = 'Type1' TYPE2 = 'Type2' CHADEMO = 'CHAdeMO' CCS = 'CCS' SCHUKO = 'Schuko' CEE3 = 'CEE3' UNKNOWN = 'unknown' class AVAILABILITY(Enum): AVAILABLE = 'AVAILABLE' OCCUPIED = 'OCCUPIED' UNKNOWN = 'UNKNOWN' class Connector(AddressableObject): def __init__( self, localAddress, parent, fromDict=None, ): super().__init__(localAddress=localAddress, parent=parent) self.plugType = AddressableAttribute(localAddress='plugType', parent=self, value=None, valueType=ChargingStation.ChargingSpot.PlugType) self.chargingPower = AddressableAttribute(localAddress='chargingPower', parent=self, value=None, valueType=float) if fromDict is not None: self.update(fromDict) def update(self, fromDict): LOG.debug('Update connector from dict') if 'plugType' in fromDict: try: self.plugType.setValueWithCarTime(ChargingStation.ChargingSpot.PlugType(fromDict['plugType']), lastUpdateFromCar=None, fromServer=True) except ValueError: self.plugType.setValueWithCarTime(ChargingStation.ChargingSpot.PlugType.UNKNOWN, lastUpdateFromCar=None, fromServer=True) LOG.warning('An unsupported type: %s was provided,' ' please report this as a bug', fromDict['plugType']) else: self.plugType.enabled = False if 'chargingPower' in fromDict: self.chargingPower.setValueWithCarTime(float(fromDict['chargingPower']), lastUpdateFromCar=None, fromServer=True) else: self.chargingPower.enabled = False for key, value in {key: value for key, value in fromDict.items() if key not in ['plugType', 'chargingPower']}.items(): LOG.warning('%s: Unknown attribute %s with value %s', self.getGlobalAddress(), key, value) def __str__(self): returnString = '' if self.plugType.enabled: returnString += f'Plug Type: {self.plugType.value.value}\n' # pylint: disable=no-member if self.chargingPower.enabled: returnString += f'Max. Charging Power: {self.chargingPower.value}kW' return returnString class Operator(AddressableObject): def __init__( self, localAddress, parent, fromDict=None, ): super().__init__(localAddress=localAddress, parent=parent) self.name = AddressableAttribute(localAddress='name', parent=self, value=None, valueType=str) self.id = AddressableAttribute(localAddress='id', parent=self, value=None, valueType=str) self.phoneNumber = AddressableAttribute(localAddress='phoneNumber', parent=self, value=None, valueType=str) if fromDict is not None: self.update(fromDict) def update(self, fromDict): LOG.debug('Update charging spot from dict') if 'name' in fromDict: self.name.setValueWithCarTime(fromDict['name'], lastUpdateFromCar=None, fromServer=True) else: self.name.enabled = False if 'id' in fromDict: self.id.setValueWithCarTime(fromDict['id'], lastUpdateFromCar=None, fromServer=True) else: self.id.enabled = False if 'phoneNumber' in fromDict: self.phoneNumber.setValueWithCarTime(fromDict['phoneNumber'], lastUpdateFromCar=None, fromServer=True) else: self.phoneNumber.enabled = False for key, value in {key: value for key, value in fromDict.items() if key not in ['name', 'id', 'phoneNumber']}.items(): LOG.warning('%s: Unknown attribute %s with value %s', self.getGlobalAddress(), key, value) def __str__(self): returnString = '' if self.name.enabled: returnString += self.name.value if self.id.enabled: returnString += f' (Id: {self.id.value})' if self.phoneNumber.enabled and self.phoneNumber.value: returnString += f' Phone: {self.phoneNumber.value}' return returnString def __str__(self): # noqa: C901 returnString = '' if self.id.enabled: returnString += f'ID: {self.id.value}\n' if self.name.enabled: returnString += f'Name: {self.name.value}\n' if self.operator is not None and self.operator.enabled: returnString += f'Operator: {self.operator}\n' if self.latitude.enabled: returnString += f'Latitude: {self.latitude.value}\n' if self.longitude.enabled: returnString += f'Longitude: {self.longitude.value}\n' if self.distance.enabled: returnString += f'Distance: {round(self.distance.value)}m\n' if self.address is not None and self.address.enabled: returnString += f'Address: {self.address}\n' if self.chargingPower.enabled: returnString += f'Max. Charging Power: {self.chargingPower.value}kW\n' returnString += f'Charging Spots: {len(self.chargingSpots)} items\n' for spot in self.chargingSpots: returnString += ''.join(['\t' + line for line in str(spot).splitlines(True)]) + '\n' returnString += f'Authentification: {", ".join([authtype.value.value for authtype in self.authTypes])}\n' returnString += 'Options: ' if self.filteredOut.enabled and self.filteredOut.value: returnString += 'filtered out; ' if self.isFavorite.enabled and self.isFavorite.value: returnString += 'favourite; ' if self.isWeChargePartner.enabled and self.isWeChargePartner.value: returnString += 'weCharge partner; ' returnString += '\n' return returnString
UTF-8
Python
false
false
20,694
py
39
charging_station.py
31
0.564415
0.563835
0
423
47.921986
153
definename/py_samples
7,610,682,076,504
939c5b471706a52992d5b722dfca635459a59419
1cdd197816a92587fb8ec4d0c79e6409ae49ddf4
/11concurrency/py_gevent.py
b9ca51c88fa5fbc6c542b82c15ec730a97466a2d
[]
no_license
https://github.com/definename/py_samples
614a31b94462828f8c85279bf1f7343c4569e9fb
42f8af6c447acc517232c200010538ae59f8db94
refs/heads/master
2022-04-27T03:07:01.797979
2022-04-11T17:14:42
2022-04-11T17:14:42
170,145,807
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import gevent from gevent import socket hosts = ["www.google.com"] jobs = [gevent.spawn(gevent.socket.gethostbyname, host) for host in hosts] gevent.joinall(jobs, timeout=5) for job in jobs: print(job.value)
UTF-8
Python
false
false
213
py
181
py_gevent.py
177
0.746479
0.741784
0
9
22.777778
74
violaferrari/fruitoscopy
10,161,892,671,078
ccb2bf957edcf6170d99484dbb1f5699d831e72c
6c4bd8124d149f4da5f550529d12af84e37d02c2
/frutopy/tables/views.py
32193519b5b45832f0137dafc4eef54fb729ba07
[]
no_license
https://github.com/violaferrari/fruitoscopy
2ff3b8dedaca87d59b9c95a1394cbf68276a28d0
0a1efe2a4343f9865b83cf64ed44c9cc892e3476
refs/heads/master
2021-01-16T21:59:35.735744
2016-07-07T13:39:36
2016-07-07T13:39:36
62,809,187
0
0
null
true
2016-07-07T13:32:58
2016-07-07T13:32:56
2016-07-07T13:32:57
2016-07-07T00:29:11
27,746
0
0
0
JavaScript
null
null
from django.http import HttpResponseRedirect from django.shortcuts import render from django.views.generic import View from django.contrib import messages from rest_framework import viewsets from .tasks import process_file from .models import ML_Model, SP_Model, Sample from tables.choices import RIPENESS_LABELS from .serializers import * import tempfile import os class SampleViewSet(viewsets.ModelViewSet): """ Allows samples to be viewed or edited. """ queryset = Sample.objects.all().order_by('-id') # Descending order serializer_class = SampleSerializer class ML_ModelViewSet(viewsets.ModelViewSet): """ Allows machine learning models to be viewed or edited. """ queryset = ML_Model.objects.all().order_by('-id') serializer_class = ML_ModelSerializer class SP_ModelViewSet(viewsets.ModelViewSet): """ Allows signal processing models to be viewed, edited, or deleted. """ queryset = SP_Model.objects.all().order_by('-id') serializer_class = SP_ModelSerializer class SampleListView(View): """ Allows user to check and modify labels and validate the sample for further training. """ template_name = 'samples_list.html' def get(self, request): samples = Sample.objects.all() return render(request, self.template_name, context={'samples': samples}) def post(self, request): samples = Sample.objects.all() validated = request.POST.getlist('validation') print(validated) for s in samples: if s.label != RIPENESS_LABELS[str(request.POST[str(s.pk)]).lower()]: s.label = RIPENESS_LABELS[str(request.POST[str(s.pk)]).lower()] s.label_is_right = True elif str(s.pk) in validated: s.label_is_right = True elif str(s.pk) not in validated and s.label_is_right == True: s.label_is_right = False s.save() messages.success(request, 'Success! The database has been updated successfully.') return render(request, self.template_name, context={'samples': samples}) class DownloadModels(View): """ """ template_name = "download_models.html" def get(self, request): samples = Sample.objects.all() return render(request, self.template_name, context={'samples': samples}) def post(self, request): return render(request, self.template_name, context={'samples': samples}) def handle_uploaded_file(f): """ Handles uploaded file and triggers its processing. """ # with tempfile.mkstemp() as destination: # name = destination.name # for chunk in f.chunks(): # destination.write(chunk) # print('Saving to %s' % name) # # tfile = tarfile.open(name, 'r:gz') # process_file.delay(name) fd, file_name = tempfile.mkstemp() for chunk in f.chunks(): os.write(fd, chunk) os.close(fd) process_file.delay(file_name) def upload_file(request): """ Handles requests for file upload. """ if request.method == 'POST': #form = UploadFileForm(request.POST, request.FILES) #if form.is_valid(): handle_uploaded_file(request.FILES['file']) return HttpResponseRedirect('/success') #print('not valid you idiot') return render(request, 'upload.html') def success(request): """ Redirects to Success page on successful file upload. """ return render(request, 'success.html') def home(request): """ Home page. """ return render(request, 'index.html')
UTF-8
Python
false
false
3,611
py
2
views.py
2
0.644143
0.644143
0
123
28.365854
89
bronzels/libpycommon
15,470,472,227,915
ab8d4ed4e5aa7f532853ab34524ab61c098b9c82
c9c321847e52ba95ddc8258dfe03bd20afa9032f
/libpycommon/libpycommonkey/me.py
f4a548ea80364e9dde9d3cc9000947ca918e1d26
[]
no_license
https://github.com/bronzels/libpycommon
6f08d2c55be4d869c785f8f91e3a6031927a7eb9
3228829a8202b862717a4b47261fbc5c841e5385
refs/heads/main
2023-07-17T04:46:08.279240
2021-09-06T10:18:23
2021-09-06T10:18:23
403,574,967
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import key key_path = key.__file__ package_key_abs_path = key_path[0:key_path.find('__init__.py')] package_key_res_path = 'key'
UTF-8
Python
false
false
129
py
37
me.py
33
0.658915
0.651163
0
5
24.8
63
jufrisk/BPGame
4,148,938,446,109
7c0883cd9c98d2ec2de0aeeb4318b1a2d2d4fc94
6708b61fd246320c0f39e8fbac182d81a027e973
/games_olio.py
28cee79ea97fcdbb851df25f6ea2e3c01a07d17d
[]
no_license
https://github.com/jufrisk/BPGame
b7276810408cb222c163b06935562f60e82ed7c1
d40b93649efc3b4e31654efce9954a8c7a69be2e
refs/heads/main
2022-12-20T12:20:45.599146
2020-10-12T10:01:15
2020-10-12T10:01:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from bp_db import printplayers from library import new_player class playerStats(object): def __init__(self, name): self.name = name self.points = 0 self.cupsfor = 0 self.cupsagainst = 0 self.cupdifference = self.cupsfor - self.cupsagainst self.gamestotal = 0 self.won = 0 self.lost = 0 self.rank = self.cupsfor + self.cupdifference * 10000 + self.points * 100000000 def updateStats(self, resultofgame): if resultofgame > 0: self.points = self.points + 3 self.won = self.won + 1 self.cupsfor = self.cupsfor + resultofgame else: self.cupsagainst = self.cupsagainst - resultofgame self.lost = self.lost + 1 self.gamestotal = self.gamestotal + 1 def getGroup(self): print(self.name, self.won, self.lost, self.cupdifference, self.points) def getRank(self): return self.rank def game(gameplayer1, gameplayer2, resplayer1, resplayer2): if resplayer1 > resplayer2: results = (gameplayer1, resplayer1), (gameplayer2, -1*resplayer1) else: results = (gameplayer2, resplayer2), (gameplayer1, -1 * resplayer2) for i in results: name, cupdiff = i playerStats[name].updateStats(cupdiff) def sortgroup(playerslist, groupaplayers): bestrank = -999999 sortedlist = [] #bestrank for player in playerslist: if bestrank < groupaplayers[player].getRank(): bestrank = groupaplayers[player].getRank() bestplyr = player sortedlist.append(bestplyr) playerslist.remove(bestplyr) bestrank = -999999 # 2nd Best rank for player in playerslist: if bestrank < groupaplayers[player].getRank(): bestrank = groupaplayers[player].getRank() bestplyr = player sortedlist.append(bestplyr) playerslist.remove(bestplyr) bestrank = -999999999999999 for player in playerslist: if bestrank < groupaplayers[player].getRank(): bestrank = groupaplayers[player].getRank() bestplyr = player sortedlist.append(bestplyr) playerslist.remove(bestplyr) sortedlist.append(playerslist[0]) return sortedlist def groupgames(players): groupagames = [ players[0], players[1], players[2], players[3], players[0], players[2], players[1], players[3], players[3], players[0], players[1], players[2] ] return groupagames def setti(players): groupaplayers = {name: games.playerStats(name=name) for name in players} return groupaplayers # # 1-2 3-4 1-3 2-4 4-1 2-3 # def main(): # # # player1temp = input("Insert player1 for the game: ") # player2temp = input("Insert player2 for the game: ") # resultofgame = game(player1temp, player2temp) # # for i in resultofgame: # name, cupdiff = i # groupaplayers[name].updateStats(cupdiff) # # #update group standings # updatedlist = sortgroup(groupA) # # for name in updatedlist: # groupaplayers[name].getGroup() # # main()
UTF-8
Python
false
false
3,268
py
13
games_olio.py
9
0.602203
0.571603
0
121
25.008264
87
openweave/openweave-core
8,237,747,275,441
bc0b0a5bc17450617f33f624ac9ea73c12c5624c
edf4f46f7b473ce341ba292f84e3d1760218ebd1
/src/test-apps/happy/test-templates/ServiceAccountManager.py
f63c46b498c6b43575a59323ffa5c00456a079c8
[ "LicenseRef-scancode-proprietary-license", "Apache-2.0" ]
permissive
https://github.com/openweave/openweave-core
7e7e6f6c089e2e8015a8281f74fbdcaf4aca5d2a
e3c8ca3d416a2e1687d6f5b7cec0b7d0bf1e590e
refs/heads/master
2022-11-01T17:21:59.964473
2022-08-10T16:36:19
2022-08-10T16:36:19
101,915,019
263
125
Apache-2.0
false
2022-10-17T18:48:30
2017-08-30T18:22:10
2022-09-17T18:22:20
2022-08-10T16:36:19
54,162
222
111
78
C++
false
false
#!/usr/bin/env python3 # Copyright (c) 2020 Google LLC. # Copyright (c) 2015-2017 Nest Labs, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # @file # Implements generate register service cmd which is used to service provisioning # Note: "openweave-core/src/test-apps/happy/test-templates/generated" # needs to be added to PYTHONPATH from __future__ import print_function from __future__ import absolute_import import future import json import grpc from grpc.framework.interfaces.face.face import ExpirationError import time import nestlabs.gateway.v2.gateway_api_pb2 as gateway_api_pb2 import nestlabs.gateway.v2.gateway_api_pb2_grpc as gateway_api_pb2_grpc from http.client import HTTPSConnection from future.moves.urllib.parse import unquote options = {} options["tier"] = None options["username"] = None options["password"] = None def option(): return options.copy() apigw_fmt = 'apigw.{tier}.nestlabs.com' apigw_siac_fmt = '{siac_name}.unstable.nestlabs.com' class ServiceClient(object): """ Gets basic account information from Service Args: tier (str): tier of the service username (str): Account email/username password (str): Account password token (str): Account access token """ def __init__(self, tier, username, password, token): self.tier = tier self.username = username self.password = password self.gateway_api_pb2 = gateway_api_pb2 self.gateway_api_pb2_grpc = gateway_api_pb2_grpc auth_token = 'Basic ' + token self._auth_metadata = [('authorization', auth_token)] def _create_gateway_service_stub(self): gateway_api_pb2_grpc = self.gateway_api_pb2_grpc if ".unstable" in self.tier: siac_name = self.tier.split('.')[0] apigw = apigw_siac_fmt.format(siac_name=siac_name) channel = grpc.insecure_channel('{}:9953'.format(apigw)) else: apigw = apigw_fmt.format(tier=self.tier) port = 443 channel_credentials = grpc.ssl_channel_credentials(None, None, None) channel = grpc.secure_channel('{}:{}'.format(apigw,port), channel_credentials) return \ gateway_api_pb2_grpc.GatewayServiceStub(channel) @property def account_id(self): gateway_api_pb2 = self.gateway_api_pb2 request = gateway_api_pb2.ObserveRequest() request.state_types.append(gateway_api_pb2.StateType.Value('ACCEPTED')) request.state_types.append(gateway_api_pb2.StateType.Value('CONFIRMED')) stub = self._create_gateway_service_stub() for response in stub.Observe(request, 999999, self._auth_metadata): for resource_meta in response.resource_metas: if 'USER' in resource_meta.resource_id: return resource_meta.resource_id if not response.initial_resource_metas_continue: break @property def structure_ids(self): gateway_api_pb2 = self.gateway_api_pb2 request = gateway_api_pb2.ObserveRequest() request.state_types.append(gateway_api_pb2.StateType.Value('ACCEPTED')) request.state_types.append(gateway_api_pb2.StateType.Value('CONFIRMED')) stub = self._create_gateway_service_stub() ids = [] try: for response in stub.Observe(request, 15, self._auth_metadata): for resource_meta in response.resource_metas: if 'STRUCTURE' in resource_meta.resource_id: ids.append(resource_meta.resource_id.encode('utf-8')) if not response.initial_resource_metas_continue: break except ExpirationError: pass finally: return ids class ServiceAccountManager(object): """ weave-register-service [-h --help] [-q --quiet] [-t --tier <NAME>] [-u --username <NAME>] [-p --password <password>] --tier option is the service tier command to generate options of register-service cmd: $ weave-register-serivce -t integration -u username@nestlabs.com -p yourpassword return: options of the options of register service command """ def __init__(self, logger_obj, opts=options): """ Initializes the ServiceAccountManager class. Args: logger_obj (logging.Logger): logger object to be used for logging. opts (dict): Dictionary which contains tier, username and password. """ self.tier = opts["tier"] self.username = opts["username"] self.password = opts["password"] self.headers = {'Content-Type': 'application/json'} self.logger = logger_obj def __pre_check(self): if not self.tier: self.tier = "integration" emsg = "ServiceAccountManager: Using default weave_service_tier %s." % (self.tier) self.logger.debug(emsg) self.host = 'home.%s.nestlabs.com' % self.tier # Siac tiers contain 'unstable' and don't expose a 'home.*' hostname. if 'unstable' in self.tier: self.host = self.host.replace('home.', '') if not self.username: emsg = "ServiceAccountManager: please provide username" raise ValueError(emsg) if not self.password: emsg = "ServiceAccountManager: please provide password" raise ValueError(emsg) self.params = json.dumps( {'email': self.username, 'username': self.username, 'password': self.password}) self.access_token, self.user_id = self.get_cz_token_userid() self.sessionJSON = self.__get_session_json() client = ServiceClient( tier=self.tier, username=self.username, password=self.password, token=self.access_token) self.structureids = client.structure_ids self.accountid = client.account_id self.initial_data = self.__get_initial_data_json() def get_cz_token_userid(self): conn = HTTPSConnection(self.host) path = '/api/0.2/create_user_login' conn.request('POST', path, self.params, headers=self.headers) login_response = conn.getresponse() login_response_data = json.load(login_response) # delay execution time.sleep(1) if login_response.status == 201: # delay execution time.sleep(1) self.logger.info("create account for user %s" % self.username) token = login_response_data['access_token'] user_id = login_response_data['user'] else: self.logger.info("get auth info for user %s" % self.username) token, user_id = self.__get_account_auth() return token, user_id def __get_account_auth(self): conn = HTTPSConnection(self.host) path = '/api/0.1/authenticate_user' conn.request('POST', path, self.params, headers=self.headers) auth_response = conn.getresponse() auth_response_data = json.load(auth_response) if auth_response.status == 200: self.logger.info("ServiceAccountManager: Authentication successful") elif auth_response.status == 400: emsg = "ServiceAccountManager: Unauthorized request for user authentication: status=%s error=%s" % ( auth_response.status, auth_response.reason) self.logger.info(emsg) raise ValueError(emsg) else: # Not a 200 or 4xx auth error, server error. emsg = "ServiceAccountManager: Service Error on user authentication: HTTPS %s: %s. " % ( auth_response.status, auth_response.reason) self.logger.info(emsg) raise ValueError(emsg) token = auth_response_data['access_token'] user_id = auth_response_data['user'] return token, user_id def __get_session_json(self): conn = HTTPSConnection(self.host) path = '/session' conn.request('POST', path, self.params, headers=self.headers) response = conn.getresponse() data = response.read() if response.status != 200 and response.status != 201: emsg = 'ServiceAccountManager: Failed with status %d: %s. Password and login correct?' % ( response.status, response.reason) self.logger.info(emsg) raise ValueError(emsg) return json.loads(data) def __get_initial_data_json(self): where_id = '00000000-0000-0000-0000-000100000010' spoken_where_id = '00000000-0000-0000-0000-000100000010' initialDataJSON = { 'structure_id': self.structureids[0].decode("UTF-8"), 'where_id': where_id, 'spoken_where_id': spoken_where_id} return initialDataJSON def run(self): self.logger.debug("[localhost] ServiceAccountManager: Run.") self.__pre_check() self.cmd = ' --account-id %s --pairing-token %s --service-config %s --init-data \'%s\'' % (self.accountid, self.sessionJSON[ 'weave']['pairing_token'], self.sessionJSON['weave']['service_config'], json.dumps(self.initial_data)) print("Weave Access Token:") print(self.sessionJSON['weave']['access_token']) print("weave-register-service generated the service registration command:") print("register-service %s\n" % self.cmd) self.logger.debug("[localhost] ServiceAccountManager: Done.") return self.cmd
UTF-8
Python
false
false
10,263
py
2,082
ServiceAccountManager.py
1,761
0.61863
0.604209
0
273
36.593407
201
ClarkTheCoder/miles2km
7,765,300,899,208
9d77a1e0a20e07d171f95666090abfe5c1c61ce2
59e8c00536168a64d017f9c7c82e516a14f5c333
/milesToKM.py
e2bc257ea9c11877da7bd794582da797340acacf
[]
no_license
https://github.com/ClarkTheCoder/miles2km
b906b23f58c32531f03f25fd77a37bf2a8740c37
9a588f51b83d8f86225798617c64d0fa25661d8d
refs/heads/master
2021-05-01T01:51:35.191216
2017-01-23T22:32:04
2017-01-23T22:32:04
79,855,227
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def milesToKm(miles): km=miles*1.60934 print(km, "km") m=input("Please enter miles: ") m=float(m) milesToKm(m)
UTF-8
Python
false
false
113
py
1
milesToKM.py
1
0.690265
0.637168
0
7
15.285714
31
ScienceStacks/common_python
2,585,570,325,525
4384564790abca63f25d9c13f8681a0d5642bde2
a08bad949da6f1025157037bb395720d49da96b9
/common_python/parallel/example_context_manager.py
5b1d41b365ff87fcb44344fe8d27f54867c28340
[ "MIT" ]
permissive
https://github.com/ScienceStacks/common_python
9c078d290b2c3459f2d1b9ee5d9310afd7b845a3
a57542245f117fe6c835cc9d7ad570b9853b7e6c
refs/heads/master
2022-11-25T04:38:57.316907
2022-11-19T18:49:00
2022-11-19T18:49:00
177,683,814
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import concurrent.futures import logging import time import numpy as np class MyThreads(object): def thread_function(self, args): name1, name2 = args print(name1, name2) name = name1 + "-" + name2 logging.info("Thread %s: starting", name) time.sleep(2) logging.info("Thread %s: finishing", name) if __name__ == "__main__": format = "%(asctime)s: %(message)s" logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S") my_threads = MyThreads() args = [('a', 'a'), ('b', 'b'), ('c', 'c')] with concurrent.futures.ThreadPoolExecutor( max_workers=3) as executor: executor.map(my_threads.thread_function, args)
UTF-8
Python
false
false
691
py
105
example_context_manager.py
85
0.628075
0.616498
0
25
26.64
50
thinker007/freebase-utils
14,482,629,762,734
e8a03672a87f88b08785911b7b57679c66ea5868
59d8268e0db85e3de33859d84b03a75448c4fa6d
/src/nrhp-upload/id-load.py
a50049b78e20a059df3874f52a79502ac0c1858b
[]
no_license
https://github.com/thinker007/freebase-utils
c8ba92d5cb4b8eab5916902e9be1634087c85cb7
f8fc5d0ebf77a3732bf33dd5181def5a20bfaf67
refs/heads/master
2021-01-02T09:15:38.118993
2010-09-15T19:32:18
2010-09-15T19:32:18
32,506,231
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Load missing NRIS ids into Freebase using FreeQ/Triple Loader. Input is a file of Wikipedia ID / NRIS ref num tuples. This file is separately generated using template/infobox data from BFG. Program creates triples to assign keys for the reference number and add appropriate types to the existing topics. Created on Jan 14, 2010 @author: Tom Morris <tfmorris@gmail.com> @copyright: Copyright 2010 Thomas F. Morris @license: Eclipse Public License (EPL) v1 ''' import json import logging from fileinput import FileInput from FreebaseSession import FreebaseSession, getSessionFromArgs def main(): file = FileInput('id-missing.txt') # 4 space separated columns 1 & 2 junk, 3 - NRIS ref#, 4 - FB ID session = getSessionFromArgs(); # status = session.mqlwrite([{"id":"/guid/9202a8c04000641f800000001378d774", "type":{"id":"/common/topic","connect":"insert"}}]) triples = [] count = 0 for line in file: fields = line.strip().split(' ') id = fields[3] refno = fields[2] triple = {'s':fields[3], 'p': 'key','o':'/base/usnris/item/'+fields[2]} triples.append(json.JSONEncoder().encode(triple)) triple.update({'p':'type','o':'/base/usnris/nris_listing'}) triples.append(json.JSONEncoder().encode(triple)) triple.update({'p':'type','o':'/base/usnris/topic'}) triples.append(json.JSONEncoder().encode(triple)) triple.update({'p':'type','o':'/protected_sites/listed_site'}) triples.append(json.JSONEncoder().encode(triple)) payload= '\n'.join(triples) # payload=json.JSONEncoder().encode({'s':'/guid/9202a8c04000641f800000001378d774','p':'alias','o':'Le remplisseur de Thomas','lang':'/lang/fr'}) # print payload session.login() # login right before submission to close window where server reboots can affect us resp,body = session.tripleSubmit(triples=payload,job_comment='Trying it again',data_comment="%d topics from U.S. Register of Historic Places" % len(triples)) print resp,body if __name__ == '__main__': main()
UTF-8
Python
false
false
2,057
py
43
id-load.py
38
0.682547
0.645114
0
46
43.73913
161
Hog-Fillet/hammer
6,158,983,113,515
5f81603fdc49c0c020428d1efa57252446d01ad3
5b5b97dcf51b8eba5682488cb1cba0a023919e68
/python_scripts/mysql.py
d4c88005d9bdb8acddddc59bb8100f9f5d4246ae
[ "MIT" ]
permissive
https://github.com/Hog-Fillet/hammer
4de60a0c7a77597d03492e4027b79c743d4308a7
c42b48e764d667af4ed8b82ee094c9c03c0fb832
refs/heads/master
2023-05-04T14:36:24.619359
2021-05-29T20:36:35
2021-05-29T20:36:35
372,063,953
0
0
MIT
true
2021-05-29T20:36:59
2021-05-29T20:27:31
2021-05-29T20:36:37
2021-05-29T20:36:58
2,451
0
0
1
Ruby
false
false
#!/usr/bin/python3 import pymysql ''' This script selects the email address and users from users MySQL table ''' db = pymysql.Connect("localhost", "<DB_USER>", "<DB_PASSWORD>", "<DB_NAME>" ) search = [] try: with db.cursor() as cursor: ### Using a while loop sql = "SELECT `users`.email, `users`.name FROM `users`" cursor.execute(sql) row = cursor.fetchone() while row is not None: print(row) row = cursor.fetchone() ### Using the cursor as iterator cursor.execute(sql) for row in cursor: print(row) finally: db.close()
UTF-8
Python
false
false
631
py
39
mysql.py
13
0.576862
0.575277
0
25
24.24
77
unbenchme/some-words-go-here
14,972,256,042,264
19dce772a8b5de18b5b4e5790633848bae032f57
63b76daa02b495f653a9c5fb1b11f70773c5d4cc
/requests/models.py
ad03f720feaf52a37d555fa554ce75aa92e161bd
[ "MIT" ]
permissive
https://github.com/unbenchme/some-words-go-here
e71ec171a48c03c1ccb1fa2c031519a6ba9745b1
f9fc089e9211895839319cd0136e66b5da085743
refs/heads/master
2020-03-30T11:29:36.717786
2015-04-08T13:15:13
2015-04-08T13:15:13
33,520,721
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models from django.conf import settings from django.utils import timezone from django.contrib.auth.models import User # Create your models here. #Category tracks broad skill-type categories (i.e. Python, Java, Tax, Excel, etc.) class Category(models.Model): username = models.ForeignKey(User, related_name ='c_user') #Tracks who created what category name = models.CharField(max_length=100, default = "A Category") timestamp = models.DateTimeField(default = timezone.now()) #Timestamp of when category created def __unicode__(self): return self.name #Requests is a request for help by a user class Request(models.Model): created_by = models.ForeignKey(User, related_name = 'r_user') #Username from contrib.auth name = models.CharField(max_length=100, default = "A Project") time_in_hours = models.DecimalField(max_digits=4,decimal_places=2) #Time request will take in hours timestamp = models.DateTimeField(default = timezone.now()) #Timestamp of request category_name = models.ForeignKey(Category) #Category of request from Category Class number_of_people = models.DecimalField(max_digits=2,decimal_places=0) #Number of people requested def __unicode__(self): return self.name # #Available identifies whether a user is available or not class Available(models.Model): username = models.OneToOneField(User, primary_key=True) #Username from contrib.auth is_available = models.BooleanField(default=None) # def __unicode__(self): # return self.username, self.is_available # RequestAccept identifies who is currently accepting what request class RequestAccept(models.Model): username = models.ForeignKey(User, related_name = "ra_user") #Username from contrib.auth request_id = models.ForeignKey(Request) #Request from request class def __unicode__(self): return self.username, self.request_id
UTF-8
Python
false
false
2,217
py
13
models.py
11
0.638701
0.63419
0
39
55.846154
150
xubigshu/python_code
188,978,592,338
61a1c97b21294e7c87bba455fe04ef217539812c
9a3edffa407c63e64e12e216eed1c02f5d7c3a5a
/cookie用法/post-cookie.py
bdd150f10201e7d7ebd1067c28900da1ff9a8220
[]
no_license
https://github.com/xubigshu/python_code
9abbfa1a198d3c15128c0350fa66e1ba0c2a0264
db59f631635ca32df251d7fe7d51961fa0f35d3e
refs/heads/master
2016-09-14T15:31:11.731714
2016-08-08T02:18:05
2016-08-08T02:18:05
53,396,693
3
2
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding:utf-8 import urllib,urllib2,cookielib cj = cookielib.LWPCookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) login_path = 'http://10.0.0.149:8080/login' data = {"name":"xds","passwd":"xxxxxxxx"} post_data = urllib.urlencode(data) request = urllib2.Request(login_path,post_data) html = opener.open(request).read() if cj: print cj cj.save('cookiefile.txt')
UTF-8
Python
false
false
388
py
261
post-cookie.py
177
0.739691
0.698454
0
14
26.785714
62
teja0009/Projects
19,653,770,372,925
8eaef24e36e2460da11c01b075935368b5cc921c
d6b21db31c312ecb0da1b52b955eac1c93c373a9
/Python_Projects/walmart/Script/scraper.py
e46b397df58a73ddf09aea37da01591a4917989e
[]
no_license
https://github.com/teja0009/Projects
84b366a0d0cb17245422c6e2aad5e65a5f7403ac
70a437a164cef33e42b65162f8b8c3cfaeda008b
refs/heads/master
2023-03-16T10:10:10.529062
2020-03-08T06:22:43
2020-03-08T06:22:43
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException from selenium.common.exceptions import NoSuchElementException from bs4 import BeautifulSoup import openpyxl from openpyxl import Workbook from openpyxl import load_workbook import os import time import string class Scraper: def __init__(self, driver_path, site_url, readfile_path, writefile_path): self.siteUrl = site_url self.driverPath = driver_path self.readFilePath = readfile_path self.writeFilePath = writefile_path self.driver = webdriver.Chrome(executable_path=self.driverPath, options=webdriver.ChromeOptions()) @staticmethod def get_states(self): path = "../ReadFile/Stores.xlsx" wb = load_workbook(path) sheet = wb.active states = [] state = '' for row in range(2, sheet.max_row): if state != sheet.cell(row=row, column=7).value: state = sheet.cell(row=row, column=7).value states.append(state) return states def get_aisle(self, path): try: self.driver.get(path) except: self.driver.close() # try: # WebDriverWait(self.driver, 3).until( # EC.presence_of_element_located((By.CLASS_NAME, 'nearby-store-count'))) # except TimeoutException: # self.driver.quit() # continue try: search_tab = WebDriverWait(self.driver, 10).until( EC.presence_of_element_located((By.CLASS_NAME, 'results-info'))) except NoSuchElementException: search_tab = None except TimeoutException: search_tab = None if search_tab is None: return None soup = BeautifulSoup(self.driver.page_source, 'html.parser') div_tags = soup.find('div', attrs={'class': 'search-tab container'}).find_all('div', attrs={'class': 'tile-in-store-info'}) for div_tag in div_tags: str = div_tag.get_text().split('|') if len(str) == 2: print(str) aisle_txt = str[0].split(' ') aisle = aisle_txt[1] return aisle def run(self): states = self.get_states(self) s_states = ["AK", "ND", "HI", "VT", "RI", "DE"] for state in states: for s_state in s_states: if state == s_state: states.remove(state) print(states) i = 0 stores_file = self.readFilePath + '/Stores.xlsx' store_wb = load_workbook(stores_file) store_sheet = store_wb.active k = 1 for i in range(5, len(states)): for store_row in store_sheet: if store_row[6].value == states[i]: if k < 13: k += 1 continue # make a directory for each state dir_name = "../Result/%s" % (states[i]) try: # open(os.path.join(os.pardir, "filename"), "w") os.mkdir(dir_name) print("Directory ", dir_name, " Created ") except FileExistsError: print("Directory ", dir_name, " already exists") store_search_url = self.siteUrl + '/store/' + str(store_row[0].value) + '/' + str( store_row[1].value) + '-ar' items_wb = load_workbook("%s/Items.xlsx" % self.readFilePath) items_sheet = items_wb.active # make a new xlsx file for each store print("File %s-%s.xlsx created" % (store_row[1].value, store_row[0].value)) save_path = "%s/%s-%s.xlsx" % (dir_name, store_row[1].value, store_row[0].value) new_wb = Workbook() new_sheet = new_wb.active print(items_sheet.max_row) for item_row in range(1, items_sheet.max_row + 1): item = items_sheet.cell(row=item_row, column=1).value item_search_url = store_search_url + '/search?query=' + item new_sheet.cell(row=item_row, column=1).value = item new_sheet.cell(row=item_row, column=2).value = self.get_aisle(item_search_url) new_wb.save(save_path) # for item_row in items_sheet: # for cell in item_row: # item = cell.value # item_search_url = store_search_url + '/search?query=' + item # print(item_search_url) # new_sheet[cell.coordinate].value = cell.value # coord = 'B%s' % cell.coordinate[1:] # new_sheet[coord] = self.get_aisle(item_search_url) # new_wb.save(save_path) new_wb.save(save_path) self.driver.close() if __name__ == '__main__': print("================== starting ==================") driverPath = "../driver/chromedriver.exe" readFilePath = "../ReadFile" writeFilePath = "../result" siteUrl = "https://www.walmart.com" Scraper = Scraper(driverPath, siteUrl, readFilePath, writeFilePath) Scraper.run() # https://www.walmart.com/
UTF-8
Python
false
false
5,675
py
1,401
scraper.py
787
0.52
0.51489
0
142
38.957746
131
sbook/jingo-minify
4,501,125,774,000
3b2fff605598050aefe11dfd6bbae6ae3531a740
15c7f69c53ec8b46806e8126c4f7985a4de4751a
/jingo_minify/management/commands/compress_assets.py
a5afc0890dcb5ad4b22ba856df44bb6f0530c200
[ "BSD-3-Clause" ]
permissive
https://github.com/sbook/jingo-minify
c5d5c2c6d51ef0f64a2041145d9085dc0e249b40
923ae25f5739d1cb8d2d13f635e51377040cab20
refs/heads/master
2017-04-28T21:10:26.412003
2011-04-14T10:06:51
2011-04-14T10:06:51
1,510,646
1
2
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import hashlib import yaml import re import shutil from subprocess import call, PIPE from django.conf import settings from django.core.management.base import BaseCommand path = lambda *a: os.path.join(settings.MEDIA_ROOT, *a) CSS_ASSET_PATTERN = re.compile('(?P<url>url(\([\'"]?(?P<filename>[^)]+\.[a-z]{3,4})(?P<fragment>#\w+)?[\'"]?\)))') class Command(BaseCommand): #pragma: no cover help = ("Compresses css and js assets defined in settings.MINIFY_BUNDLES") requires_model_validation = False def handle(self, **options): jar_path = (os.path.dirname(__file__), '..', '..', 'bin', 'yuicompressor-2.4.4.jar') path_to_jar = os.path.realpath(os.path.join(*jar_path)) v = '' if 'verbosity' in options and options['verbosity'] == '2': v = '-v' bundle_versions = {} for ftype, bundle in settings.MINIFY_BUNDLES.iteritems(): # Create the bundle file type dictonary if ftype not in bundle_versions: bundle_versions[ftype] = {} for name, files in bundle.iteritems(): namespace = getattr(settings, 'MINIFY_NAMESPACE', '') concatted_file_name = '%s%s-all.%s' % (namespace, name, ftype,) compressed_file_name = '%s%s-min.%s' % (namespace, name, ftype,) concatted_file = path(ftype, concatted_file_name) compressed_file = path(ftype, compressed_file_name) real_files = [path(f.lstrip('/')) for f in files] if real_files: dir_name = os.path.dirname(compressed_file) if not os.path.exists(dir_name): os.makedirs(dir_name) # Concats the files. call("cat %s > %s" % (' '.join(real_files), concatted_file), shell=True) # Rewrite image paths in css if ftype == 'css': self.rewrite_asset_paths_in_css(concatted_file) # # Compresses the concatenation. call("%s -jar %s %s %s -o %s" % (settings.JAVA_BIN, path_to_jar, v, concatted_file, compressed_file), shell=True, stdout=PIPE) file_hash = self.file_hash(compressed_file) bundle_versions[ftype][name] = { 'hash': self.file_hash(compressed_file), 'concatted': concatted_file_name, 'compressed': compressed_file_name,} # Write settings to file settings_yaml = open(settings.MINIFY_YAML_FILE, "w") yaml.dump(bundle_versions, settings_yaml) settings_yaml.close() def rewrite_asset_paths_in_css(self, filename): tmp = os.tmpfile() rel_filename = os.path.join(settings.MEDIA_ROOT, filename) css = open(rel_filename, mode='r') self.asset_hashs = {} for line in css: matches = [] for match in re.finditer(CSS_ASSET_PATTERN, line): try: grp = match.groupdict() absolute = grp['filename'].startswith('/') if absolute: asset_path = os.path.join(settings.MEDIA_ROOT, '.'+grp['filename']) else: asset_path = os.path.join(os.path.dirname(rel_filename), grp['filename']) asset = os.path.relpath(asset_path, settings.MEDIA_ROOT) asset_hash = self.get_asset_hash(asset) asset = grp['filename'].rsplit('.',1) asset[0]+= '__%s' % asset_hash asset = '.'.join(asset) asset_version = 'url(%s)' % asset matches.append((grp['url'], asset_version)) except KeyError: print "Failed to find %s in version map. Is it an absolute path?" % asset raise SystemExit(1) for old, new in matches: line = line.replace(old, new) tmp.write(line) tmp.flush() tmp.seek(0) css.close() css = open(rel_filename, mode='wb') shutil.copyfileobj(tmp, css) def get_asset_hash(self, asset): asset_hash = self.asset_hashs.get(asset, None) if not asset_hash: try: asset_hash = self.file_hash(asset) self.asset_hashs.update({asset:asset_hash}) except: print 'Asset "%s" referenced in css and not found.' % asset asset_hash = '' return asset_hash def file_hash(self, filename): f = open(filename, mode='rb') try: return hashlib.md5(f.read()).hexdigest()[:8] finally: f.close()
UTF-8
Python
false
false
5,349
py
7
compress_assets.py
4
0.472238
0.469994
0
141
36.943262
114
zaferyumruk/zyrad_asl
14,920,716,432,356
52db127c9d2496f00fe76c964ab3c07f93a1ab17
d498df6a90efff21faa96a606c63e6bb6e888118
/zyradlib/utilities.py
3c7537f9d65d4999b00e2d2229e6c83ae2182d86
[]
no_license
https://github.com/zaferyumruk/zyrad_asl
3172043bcb42972eba42f542c270eb4b0a237a93
06f49d82f256c9676184958553751bc91ed0fe5a
refs/heads/master
2017-10-07T17:21:27.865976
2017-03-15T23:12:01
2017-03-15T23:12:01
81,299,879
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from matplotlib import pyplot as plt from collections import Iterable as It def pickfromrange(range, Data): outData = [] if len(Data[0]) > 1: for data in Data: outData.append(data[range]) else: outData.append(Data[range]) return outData def peekData(x, y, ylegend=[''], title=''): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) if isinstance(y[0], It): ylegend_missing = False if len(ylegend) is not len(y): ylegend_missing = True ylegend = [] for i in range(len(y)): if ylegend_missing: ylegend.append('y' + str(i)) plt.plot(x, y[i], label=ylegend[i]) plt.grid() plt.legend() else: plt.plot(x, y) plt.grid() plt.title(title) plt.draw() plt.show() def plotData(x, y, info='Train'): time = range(0, x.shape[0]) for datatype in range(x.shape[1]): peekData(time, x[:, datatype], title=info + 'Input' + str(datatype)) # trq for datatype in range(y.shape[1]): peekData(time, y[:, datatype], title=info + 'Output' + str(datatype)) # trq def compareData(x, y, info='Train'): time = range(0, x.shape[0]) for datatype in range(x.shape[1]): peekData(time, [x[:, datatype], y[:, datatype]], title=info + 'Output' + str(datatype), ylegend=['input', 'output']) # trq
UTF-8
Python
false
false
1,425
py
20
utilities.py
20
0.550175
0.541053
0
50
27.5
95
mateusrodc/bowling_kata
6,837,587,975,477
e0f497746e82f27e925173051ac19ef5af274536
4ea29df175697b87378738e367cd43bd0d96c3fc
/teste.py
25f124c4798370dd37ea52ccb2cc7537d9083eb4
[]
no_license
https://github.com/mateusrodc/bowling_kata
c6261cab1c70eee6143deea423618a857457a25a
4e449083aab606fe7618d0d1e6f7f0c2e770d830
refs/heads/master
2021-02-10T02:48:24.053322
2020-03-02T14:34:04
2020-03-02T14:34:04
244,346,249
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import bowling_kata nome=input("Digite o nome do jogador:") while nome != 'sair': jogador1 = bowling_kata.CalculadoraPontuacaoBoliche(nome) jogador1.jogar(0) nome=input("Digite o nome do jogador ou sair para finalizar:")
UTF-8
Python
false
false
233
py
2
teste.py
2
0.733906
0.72103
0
6
37.833333
66
ZoraOswald/scRNAseq
13,589,276,556,342
ea57578cc31459de431c9a9896ce12db2a391606
8f7f212f4cb80616e34f084591ebb37f012919d6
/src/network/network_dim_reduction.py
baf29e8b3d8a9023a5fd2d93f62a533fcca32c20
[]
no_license
https://github.com/ZoraOswald/scRNAseq
d3843a63f6589df81a1686b1fbc1a3dd926034b7
03e52ed70334bb80945b63b9b57d47d9700ffe45
refs/heads/master
2020-04-11T08:02:20.876077
2018-12-13T11:31:57
2018-12-13T11:31:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Sun Dec 9 00:44:52 2018 @author: silus """ import numpy as np import networkx as nx import pandas as pd import pygsp as pg from helpers.gsp_helpers import graph_sampling, graph_filtering from helpers.data_import import train_data_in_network from scipy import sparse import matplotlib.pyplot as plt # Two dimensionality reduction techniques based on GSP: # 1. Graph sampling based on vertices where signal energy is the most concentrated # 2. Graph frequency sampling: Low/high pass filtering adjacency_largest_component = sparse.load_npz('adjacency_largest_component.npz') n_nodes = adjacency_largest_component.shape[0] node_features = train_data_in_network(); node_features = np.unique(node_features, axis=0) # read_csv preserves ordering of features as in the csv (increasing feature idx). # Need to translate this into node order node_order = np.argsort(node_features[:,1]) nodes_ordered = node_features[node_order,0] train_data = pd.read_csv('../../data/train_data.csv',usecols=node_features[:,1]) # Map train data onto a graph signal (One value per node) graph_signals = np.zeros((train_data.shape[0], n_nodes)) graph_signals[:,nodes_ordered] = train_data.values # Create PyGSP graph #graph = pg.graphs.Graph(adjacency_largest_component) # Compute Fourier basis #graph.compute_fourier_basis(recompute=False) # Create signal #### TODO ######
UTF-8
Python
false
false
1,438
py
14
network_dim_reduction.py
8
0.732267
0.718359
0
40
33.8
82
ThierryCols/hug-demo
197,568,542,678
f226ee9ad7f77322b597bd34c24c24e402c4b7b3
6f8c7a02175a832fc72ff28e6b71d0f5c7be0e34
/tests.py
a6ae16a782e62b024303043bfa05c4d8fcd0264e
[]
no_license
https://github.com/ThierryCols/hug-demo
8f7b241b3beeca8c1cf60559e0335f2e41ae099c
4ea8df72253cde70982b9659e3eddfea391db1e6
refs/heads/master
2020-09-17T18:45:13.519490
2016-09-16T06:40:44
2017-04-01T14:19:42
67,332,970
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from demo_data import * from main import * def test_compute_scores(): expected_result = {1: 6, 2: 0, 3: 1} assert compute_scores() == expected_result def test_fetch_next_games(): assert len(fetch_next_games()) == 2 def test_add_prono(): assert len(pronos) == 5 add_prono({'id': 6, 'gameId': 3, 'userId': 3, 'teamId1': 2, 'teamId2': 0, 'winner': 1}) assert len(pronos) == 6 new_expected_result = {1: 6, 2: 0, 3: 2} assert compute_scores() == new_expected_result def test_fetch_user_pronos(): assert len(fetch_user_pronos(1)) == 2
UTF-8
Python
false
false
597
py
4
tests.py
3
0.589615
0.547739
0
24
23.875
63
a1573595/Neural_Emotion_Intensity_Prediction
5,248,450,080,532
a9d97c953503d76fe9e67015fd652faaf02be524
ab851af2448be6e1307ba5944ab6136b35954c6d
/intermediate_files/deepmoji/encode_texts.py
c446b8a06c4375cb7eb6db5a614845b062b584b3
[]
no_license
https://github.com/a1573595/Neural_Emotion_Intensity_Prediction
351589d51015a9fe548c6c94ac7640e40adb3dc4
28425e9e87c6b7eabef6609df0627f4b131e715d
refs/heads/master
2020-07-14T04:09:54.013899
2019-08-29T20:11:50
2019-08-29T20:11:50
205,234,703
0
0
null
true
2019-08-29T19:18:35
2019-08-29T19:18:34
2019-08-14T01:37:26
2018-10-12T17:22:29
70
0
0
0
null
false
false
# -*- coding: utf-8 -*- """ Use DeepMoji to encode texts into emotional feature vectors. """ from __future__ import print_function, division import example_helper import json import csv import numpy as np from deepmoji.sentence_tokenizer import SentenceTokenizer from deepmoji.model_def import deepmoji_feature_encoding from deepmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH import pandas as pd import sys TEST_SENTENCES = list(pd.read_csv(sys.argv[1],delimiter='\t',header=None, encoding='utf-8')[1]) #change path to cover all 4 emotions for all 3 sets maxlen = 30 batch_size = 32 print('Tokenizing using dictionary from {}'.format(VOCAB_PATH)) with open(VOCAB_PATH, 'r') as f: vocabulary = json.load(f) st = SentenceTokenizer(vocabulary, maxlen) tokenized, _, _ = st.tokenize_sentences(TEST_SENTENCES) print('Loading model from {}.'.format(PRETRAINED_PATH)) model = deepmoji_feature_encoding(maxlen, PRETRAINED_PATH) model.summary() print('Encoding texts..') encoding = model.predict(tokenized) #print('First 5 dimensions for sentence: {}'.format(TEST_SENTENCES[0])) print(encoding.shape) np.save(sys.argv[2], encoding) print('SAVED') # Now you could visualize the encodings to see differences, # run a logistic regression classifier on top, # or basically anything you'd like to do.
UTF-8
Python
false
false
1,310
py
36
encode_texts.py
5
0.755725
0.745802
0
42
30.190476
147
tomarint/procon
17,093,969,855,547
0ede3eaa3329d430e6f7aaf1bd0e8c5029793b6a
184ea0db92d3b2516be6a8df76db38a68d32b62d
/codeforces/cr555_3/c.py
d525c06417d1760fa6fed997a2c8ea9879828773
[]
no_license
https://github.com/tomarint/procon
bc8b005bae24a122c82045f5c66071753e6b1af9
c99bffd83d13278fa563489591a361ee74679f27
refs/heads/master
2020-04-20T16:53:53.542580
2020-04-19T04:15:55
2020-04-19T04:15:55
168,972,492
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import collections def solve(): N = int(sys.stdin.readline().rstrip()) A = list(map(int,sys.stdin.readline().rstrip().split())) q = collections.deque(A) ans = [] old = 0 for i in range(N-1): l = q.popleft() r = q.pop() if l < r: if old < l: old = l ans.append('L') q.append(r) elif old < r: old = r ans.append('R') q.appendleft(l) else: break else: if old < r: old = r ans.append('R') q.appendleft(l) elif old < l: old = l ans.append('L') q.append(r) else: break else: r = q.pop() if old < r: old = r ans.append('R') print(len(ans)) print(''.join(ans)) solve()
UTF-8
Python
false
false
978
py
729
c.py
567
0.365031
0.362986
0
43
21.744186
60
chrisstianandres/almacen_yamaha
8,400,956,070,152
f173882d47b64e33fead1ec671e56d2947d65e97
bb3ae8193289e98e01bea265646f7c77f20558af
/app/views.py
c636e63d7c2c1bf577e9d5dfaf93d731562ada3e
[]
no_license
https://github.com/chrisstianandres/almacen_yamaha
4edbbc827bba7143f466d11c066e522cb8357b25
711096cd958e92cb6ec9423730a92120ac614337
refs/heads/master
2023-05-13T14:15:30.184461
2021-06-07T15:02:46
2021-06-07T15:02:46
370,217,067
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json from django.contrib.auth import authenticate, login from django.http import HttpResponse from django.shortcuts import render # Create your views here. from django.views.decorators.csrf import csrf_exempt from app.producto.models import producto from sistema_yamaha.settings import MEDIA_URL def menu(request): logo = '{}{}'.format(MEDIA_URL, 'logo.png') data = { 'title': 'Pagina de Inicio', 'entidad': 'Menu Principal', 'logo': logo, 'productos': producto.objects.all()[0:7], 'productos_full': producto.objects.all() } return render(request, 'index.html', data) # return render(request, 'bases/base.html', data) @csrf_exempt def connect(request): data = {} if request.method == 'POST' or None: username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user is not None: login(request, user) data['resp'] = True else: data['error'] = 'Error en las credenciales de acceso' else: data['error'] = 'Metodo Request no es Valido.' return HttpResponse(json.dumps(data), content_type="application/json")
UTF-8
Python
false
false
1,227
py
142
views.py
114
0.660147
0.658517
0
39
30.461538
90
GregoryElliott/TGMA_NLP_Project
12,970,801,263,071
fc05caa53d75d0343684f8a9333473abc47eaad8
010bf55a456e8efb1fa01a5a8ecc9935586172b2
/test_nom.py
df1f014f830108fd70eff4dc0d2d3d6675328f6d
[]
no_license
https://github.com/GregoryElliott/TGMA_NLP_Project
4b8007b28ccdb78e5ec794683b3d904a1d6c8ce6
15bc517287f7d822b815c84528f7fe67aaa4dfb1
refs/heads/master
2021-01-10T17:41:33.627749
2016-02-22T22:58:53
2016-02-22T22:58:53
51,664,371
0
1
null
false
2016-02-22T22:58:54
2016-02-13T20:28:42
2016-02-13T20:29:40
2016-02-22T22:58:53
12,435
0
0
2
Python
null
null
import gg_api import json with open('gg2013answers.json') as f: j = json.load(f) mine = gg_api.get_nominees(2013) for award in mine: if j['award_data'][award]['nominees'] != mine[award]: print 'award: ' + award print 'correct: ' + str(j['award_data'][award]['nominees']) print 'mine: ' + str(mine[award])
UTF-8
Python
false
false
316
py
12
test_nom.py
7
0.648734
0.623418
0
13
23.307692
61
amoux/david
8,675,833,949,386
d38a017589f11a094a85b2ac9b2d4e681b9f5001
7a3ef0e0643cbf23defd3d48b8496037b0373500
/david/text/_proto_tokenizers.py
47a50f21d15abf3698936e4b79403bd89f735e49
[]
no_license
https://github.com/amoux/david
d1ef452ebdc31d99555cab15cbbb472c35612a94
825f30806696e5c77f669ec936fda0e8db7829f3
refs/heads/master
2023-08-03T22:42:51.332841
2021-10-06T18:57:37
2021-10-06T18:57:37
199,486,952
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" david.text._proto_tokenizers. ---------------------------- This module is a temporary hidden module to all the tokenizers until all are optimized properly (And simplified!). In the meantime the true tokenizers module will hold working Tokenizers. One by one. """ from __future__ import print_function, unicode_literals import os import re from collections import Counter from string import ascii_letters from typing import IO, Dict, Iterator, List, Optional, Union import spacy import torch from nltk.tokenize.casual import (EMOTICON_RE, HANG_RE, WORD_RE, _replace_html_entities, reduce_lengthening, remove_handles) from .preprocessing import normalize_whitespace, unicode_to_ascii class VocabularyBase(object): sos_special_token: int = 0 eos_special_token: int = 1 def __init__(self, name: str = None): self.name = name self.word2index: Dict[str, int] = {} self.word2count: Dict[str, int] = {} self.index2word: Dict[int, str] = { self.sos_special_token: "SOS", self.eos_special_token: "EOS", } self.num_words: int = 2 # count for both SOS and EOS tokens. def add_words_from_split(self, sentence: str) -> None: for word in sentence.split(): self.add_word(word) def iter_words_from_tokenizer(self, word: str) -> None: self.add_word(word) def add_word(self, word: str) -> None: if word not in self.word2index: self.word2index[word] = self.num_words self.word2count[word] = 1 self.index2word[self.num_words] = word else: self.word2count[word] += 1 class CharacterTokenizer(VocabularyBase): STRING_CHARACTERS: str = ascii_letters + " .,;'" def __init__(self): super().__init__("CharTokenizerVocab") def get_character_id(self, character: str) -> int: """Finds character index from STRING_LETTERS, e.g. "a" = 0""" return self.STRING_CHARACTERS.find(character) def character_to_tensor(self, character: str): """Turn a single character into a <1 x n_characters> Tensor""" char_size = 1 if len(character) != char_size: raise ValueError(f"Letter size must be = 1, not: {len(character)}") num_characters = len(self.STRING_CHARACTERS) tensor = torch.zeros(char_size, num_characters) tensor[0][self.get_character_id(character)] = char_size return tensor def word_to_tensor(self, sequence: str): """Turn a string sequence into an array of one-hot char vectors.""" char_size = 1 sequence_size = len(sequence) num_letters = len(self.STRING_CHARACTERS) tensor = torch.zeros(sequence_size, char_size, num_letters) for i, char in enumerate(sequence): tensor[i][0][self.get_character_id(char)] = char_size return tensor class WordTokenizer(CharacterTokenizer): def __init__( self, preserve_case: bool = True, reduce_len: bool = False, strip_handles: bool = False, ): super() self.preserve_case = preserve_case self.reduce_len = reduce_len self.strip_handles = strip_handles def tokenize(self, sequence: str) -> List[str]: sequence = _replace_html_entities(sequence) if self.strip_handles: sequence = remove_handles(sequence) if self.reduce_len: sequence = reduce_lengthening(sequence) safe_seq = HANG_RE.sub(r"\1\1\1", sequence) words = WORD_RE.findall(safe_seq) if not self.preserve_case: emoji = EMOTICON_RE.search words = list(map((lambda w: w if emoji(w) else w.lower()), words)) return words class SentenceTokenizer(VocabularyBase): """Sentence tokenizer built with spacy. Usage: >>> tokenizer = SentenceTokenizer() >>> text = ("Hello world's this is one sentence!. " "Yes? and another sentence.") >>> sent_tokens = tokenizer.tokenize(text) ... ['<start> hello world s this is one sentence ! . <end>', '<start> yes ? <end>', '<start> and another sentence . <end>'] """ def __init__(self): super().__init__("SentTokenizerVocab") def pad_punctuation(self, sequence: str, special_tokens: bool = False): """Padding punctuation with white spaces keeping the punctuation.""" string = unicode_to_ascii(sequence.lower().strip()) string = re.sub(r"([?.!,¿])", r" \1 ", string) string = re.sub(r'[" "]+', " ", string) string = re.sub(r"[^a-zA-Z?.!,¿]+", " ", string) string = string.rstrip().strip() if special_tokens: string = "<start> " + string + " <end>" return string def tokenize( self, sequence: Union[List[str], str], special_tokens: bool = True, lang: str = "en_core_web_sm", ) -> Iterator[List[str]]: """Basic sentence tokenizer with the option to add a <start> and an <end> special token to the sentence so that the model know when to start and stop predicting. """ if isinstance(sequence, str): sequence = [sequence] nlp = spacy.load(lang) for doc in nlp.pipe(sequence): for sent in doc.sents: sent = sent.text.strip() if special_tokens: yield self.pad_punctuation(sent, special_tokens=True) else: yield sent class BaseTokenizer(object): """Base tokenizer class for all tokenizers.""" def __init__( self, vocab_file: Optional["vocab.txt"] = None, document: Optional[List[str]] = None, ): """ vocab_file: Either load an existing vocabulary of tokens. document: Or load from an iterable list of string sequences. preprocess: Normalize whitespace and enforce ASCII. tokenizer: Callable method. If None, WordTokenizer is used. """ self.tokens_to_ids: Dict[str, int] = {} self.token_counter: Dict[str, int] = Counter() self._num_tokens: int = 0 self._string_normalizer: object = None if vocab_file: self.from_file(vocab_file) elif document: self.from_doc(document) def add_token(self, token: Union[List[str], str]): """Add a single string token (word) to the vocabulary.""" # A List[token] can be added only of theres only one string token. if isinstance(token, list) and len(token) == 1: token = str(token[0]) if token not in self.tokens_to_ids: self.tokens_to_ids[token] = self._num_tokens self._num_tokens += 1 self.token_counter[token] = 1 else: self.token_counter[token] += 1 def to_file(self, file_name="vocab.txt") -> IO: """Saves tokens to vocabulary text file.""" with open(file_name, "w") as vocab_file: for token in self.tokens_to_ids.keys(): vocab_file.write(f"{token}\n") def from_file(self, file_name="vocab.txt") -> IO: """Add tokens from a vocaulary text file.""" with open(file_name, "r") as vocab_file: for token in vocab_file: self.add_token(token.replace("\n", "")) def from_doc(self, document: List[str]) -> None: """Add tokens from an iterable of string sequences.""" for string in document: string = self._string_normalizer(string) tokens = self.tokenize(string) for token in tokens: self.add_token(token) def _encode(self, string: str) -> List[int]: """Converts a string in a sequence of integer ids using the tokenizer and vocabulary. NOTE: whitespace and ASCII normalization is applied. """ tok2id = self.tokens_to_ids string = self._string_normalizer(string) tokens = self.tokenize(string) return [tok2id[token] for token in tokens if token in tok2id] def _decode(self, tokens: List[int]) -> List[str]: # Root vocab decoder, converts a sequence of toks[id] => toks[str] id2tok = {i: t for t, i in self.tokens_to_ids.items()} tokens = [id2tok[index] for index in tokens if index in id2tok] return tokens def convert_tokens_to_string( self, tokens: List[str], clean_tokenization=True ) -> str: """Convert a sequence of tokens (string) in a single string.""" string = " ".join(tokens) if clean_tokenization: string = BaseTokenizer.clean_tokenization(string) return string def convert_tokens_to_ids(self, tokens: List[str]) -> List[int]: """Converts a single or a list of string tokens, in a single int:id.""" tok2id = self.tokens_to_ids return [tok2id[token] for token in tokens if token in tok2id] def convert_ids_to_tokens(self, tokens: List[int]) -> List[str]: """Converts a single index or a sequence of indices (integers) in a token, using the vocabulary and tokens. """ return self._decode(tokens) @staticmethod def clean_tokenization(string: str) -> str: """Clean up spaces before punctuations and abreviated forms.""" string = ( string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" do not", " don't") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") .replace(" / ", "/") ) return string class SocialMediaTokenizer(BaseTokenizer): r"""Social media aware tokenizer. Usage: >>> tokenizer = SocialMediaTokenizer('yt-comments-md') '< SocialMediaTokenizer(tokens=63844) >' >>> text = "Hello world! This is a social media tokenizer 🤗" >>> tokenized_text = tokenizer.tokenize(text) ['hello', 'world', '!', 'this', 'is', 'a', 'social', 'media', 'tokenizer', '🤗'] >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) [477, 467, 164, 9, 49, 22, 3641, 4202, 1809] >>> tokenizer.convert_ids_to_tokens(indexed_tokens) ['hello', 'world', '!', 'this', 'is', 'a', 'social', 'media', '🤗'] ## Adding a new token if its not in the vocabulary. >>> tokenizer.add_token("tokenizer") # or ["tokenizer"] of len=1 >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) [477, 467, 164, 9, 49, 22, 3641, 4202, 63844, 1809] ## The `token="tokenizer"` was added to `index=63844`. >>> tokenizer.convert_ids_to_tokens(indexed_tokens) ['hello', 'world', '!', 'this', 'is', 'a', 'social', 'media', 'tokenizer', '🤗'] """ SM_TOKENIZER_MODELS = {"yt-comments-md": YTCommentsConfig.VOCAB_FILE} def __init__( self, vocab_file: Optional["vocab.txt"] = None, document: Optional[List[str]] = None, preserve_case=False, reduce_len=False, strip_handles=False, ): # This is not a real implementation! if vocab_file in self.SM_TOKENIZER_MODELS: vocab_file = self.SM_TOKENIZER_MODELS[vocab_file] self.preserve_case = preserve_case self.reduce_len = reduce_len self.strip_handles = strip_handles self._string_normalizer = self.normalize_string super().__init__(vocab_file, document) def normalize_string(self, string: str) -> str: if not self.preserve_case: string = string.lower() return normalize_whitespace(unicode_to_ascii(string)) def tokenize(self, string: str) -> List[str]: string = _replace_html_entities(string) if self.strip_handles: string = remove_handles(string) if self.reduce_len: string = reduce_lengthening(string) safe_string = HANG_RE.sub(r"\1\1\1", string) tokens = WORD_RE.findall(safe_string) if not self.preserve_case: emoji = EMOTICON_RE.search tokens = list( map((lambda token: token if emoji(token) else token.lower()), tokens) ) return tokens def __repr__(self): return f"< SocialMediaTokenizer(tokens={self._num_tokens}) >"
UTF-8
Python
false
false
12,674
py
73
_proto_tokenizers.py
53
0.577488
0.568799
0
343
35.909621
85
dhvalden/phd
18,305,150,622,166
52b3ba434107633336a63e609f072b343bc92d50
e9a73efe52ac851867f886f8a32f201a1e6fe3b5
/2_data_preparation/4_filterbylang.py
6d0bc882be72178e65b371fcbdfacccfe4bcf9f9
[]
no_license
https://github.com/dhvalden/phd
b4104ca9d4e41eb0e252b73a4defd16ab6400f46
dd9b32e6773aa422d17c50b6b38ea40c4d4fe8f6
refs/heads/master
2021-11-27T07:26:47.275561
2021-11-23T03:32:46
2021-11-23T03:32:46
252,271,068
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 import os import sys import argparse import ujson """ This script receives json files seprated by social movement and outputs json files with records on only the selected language. """ def validate_file(file_name): """ validate file name and path. """ MSG_INVALID_PATH = "Error: Invalid file path/name. Path %s does not exist." if not valid_path(file_name): print(MSG_INVALID_PATH % (file_name)) quit() return def valid_path(path): # validate file path return os.path.exists(path) def filterlang(args): input_file = args.inputf output_file = args.outputf lang = args.lang counter = 0 validate_file(input_file) sys.stdout.write("Filtering %s... " % input_file) with open(input_file, "r", encoding="utf-8") as f,\ open(output_file, "w", encoding="utf-8") as outf: for line in f: tweet = ujson.loads(line) if tweet["lang"] == lang: outf.write(ujson.dumps(tweet, ensure_ascii=False)+"\n") counter += 1 sys.stdout.write("Done!. %s tweets filtered.\n" % counter) def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--inputf", type=str, help="Input file") parser.add_argument("-o", "--outputf", type=str, help="Output file") parser.add_argument("-l", "--lang", type=str, help="Filter Language") args = parser.parse_args() if args.inputf is not None and args.outputf is not None: filterlang(args) if __name__ == '__main__': main()
UTF-8
Python
false
false
1,644
py
48
4_filterbylang.py
37
0.591241
0.5882
0
69
22.826087
79
aayushmdr/financetracker
6,640,019,452,106
51d21dfc7c6dde731e2f04158cf8e24d93d352ff
fbf677633c87280645ddb3f3c32e64bf4b01ca13
/expense/views.py
972a4b71d69b4074bf65c98d10dddb87522af69f
[]
no_license
https://github.com/aayushmdr/financetracker
f1a1b8ff8c020eab4287756f65bd88e180aa0292
85768885f7f619e7b7f30fed8293d94f624bb3e8
refs/heads/master
2023-04-11T05:00:08.390560
2021-04-23T10:59:19
2021-04-23T10:59:19
347,015,780
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import render, redirect from .models import * from django.contrib.auth.decorators import login_required from django.contrib import messages from .forms import * from .filters import * def login_success(request): samt = StartAmt.objects.filter(user=request.user) if not samt: return redirect('start') else: return redirect('home') @login_required() def start(request): if request.method=='POST': form = StartForm(request.POST) if form.is_valid(): instance = form.save(commit=False) instance.user = request.user instance.save() return redirect('home') else: form = StartForm() return render(request, 'expense/start.html', {'form': form}) @login_required() def index(request): balance = '' difference = '' expenses = Expense.objects.filter(user = request.user) expenses1 = Expense.objects.filter(user = request.user) all_incomes = Income.objects.filter(user = request.user) startamt = StartAmt.objects.filter(user = request.user) firstFiveexp = Expense.objects.filter(user = request.user).order_by('-date')[:5] firstFiveinc = Income.objects.filter(user = request.user).order_by('-date')[:5] categories = Category.objects.all() sum1 = 0 a=0 for exp in expenses: sum1 += exp.amount print(sum1) Incomesum = 0 for income in all_incomes: Incomesum += income.amount for bal in startamt: a = bal.amount if a>0: balance = (a+Incomesum)-sum1 difference = sum1-(Incomesum+a) list1 = [] catlist = [] b = 1 sum = 0 for cat in categories: cat1 = Expense.objects.filter(category=b, user=request.user) cat2 = Category.objects.get(id=b) catlist.append(cat2) for cat in cat1: sum += cat.amount list1.append(sum) sum = 0 b += 1 context = { 'expenses':expenses, 'incomes': all_incomes, 'totalexp':sum1, 'totalIncome':Incomesum, 'diff':difference, 'firstFiveexp':firstFiveexp, 'firstFiveinc':firstFiveinc, 'balance':balance, 'start':startamt, 'list': list1, 'catlist': catlist, } return render(request,'expense/index.html',context) @login_required() def expenseDetail(request): expenses = Expense.objects.filter(user = request.user).order_by('-date') categories = Category.objects.all() myFilter = ExpenseFilter(request.GET, queryset=expenses) expenses = myFilter.qs list1 = [] catlist = [] b=1 sum=0 for cat in categories: cat1 = Expense.objects.filter(category=b,user = request.user) cat2 = Category.objects.get(id=b) catlist.append(cat2) for cat in cat1: sum += cat.amount list1.append(sum) sum = 0 b += 1 context = { 'expenses': expenses, 'sum':sum, 'categories':categories, 'list':list1, 'catlist':catlist, 'myFilter' : myFilter } return render(request, 'expense/expensesdetail.html', context) @login_required() def detailExpense(request, pk): expense = Expense.objects.get(id = pk) context = { 'expense': expense, } return render(request, 'expense/expensedetail.html', context) @login_required() def addExpense(request): if request.method == 'POST': form = ExpenseForm(request.POST, request.FILES) if form.is_valid(): instance = form.save(commit=False) instance.user = request.user instance.save() return redirect('all_expenses') else: form = ExpenseForm() return render(request, 'expense/addexpense.html',{'form':form, 'catform': AddCategoryForm}) def addCategory(request): if request.method == 'POST': form = AddCategoryForm(request.POST) if form.is_valid(): instance = form.save(commit=False) instance.user = request.user instance.save() return redirect('add_expense') else: form = AddCategoryForm() return render(request, 'expense/addexpense.html', {'catform': AddCategoryForm}) def updateExpense(request, pk): expense = Expense.objects.get(id = pk) form = ExpenseForm(instance=expense) if request.method == 'POST': form = ExpenseForm(request.POST, instance=expense) if form.is_valid(): form.save() messages.success(request, 'Item updated successfully!') return redirect('all_expenses') context ={ 'form':form } return render(request, 'expense/update_expense.html',context) def deleteExpense(request, pk): expense = Expense.objects.get(id = pk) if request.method=='POST': expense.delete() messages.success(request, 'Item deleted!') return redirect('all_expenses') context ={ 'expense':expense } return render(request, 'expense/delete.html', context) @login_required() def detailIncome(request, pk): income = Income.objects.get(id = pk) context = { 'income': income, } return render(request, 'expense/single_income.html', context) def incomeDetail(request): incomes = Income.objects.filter(user = request.user) categories = IncomeCategory.objects.all() myFilter = IncomeFilter(request.GET, queryset=incomes) incomes = myFilter.qs context = { 'incomes': incomes, 'myFilter' : myFilter } return render(request, 'expense/incomedetail.html', context) @login_required() def addIncome(request): if request.method == 'POST': form = IncomeForm(request.POST) if form.is_valid(): instance = form.save(commit=False) instance.user = request.user instance.save() return redirect('all_incomes') else: form = IncomeForm() return render(request, 'expense/addincome.html',{'form':form}) @login_required() def updateIncome(request, pk): income = Income.objects.get(id = pk) form = IncomeForm(instance=income) if request.method == 'POST': form = IncomeForm(request.POST, instance=income) if form.is_valid(): form.save() messages.success(request, 'Item updated successfully!') return redirect('all_incomes') context ={ 'form':form } return render(request, 'expense/update_income.html',context) @login_required() def deleteIncome(request, pk): income = Income.objects.get(id = pk) if request.method=='POST': income.delete() messages.success(request, 'Item deleted!') return redirect('all_incomes') context ={ 'income':income } return render(request, 'expense/income_delete.html', context)
UTF-8
Python
false
false
6,922
py
15
views.py
7
0.612973
0.607917
0
260
25.626923
95
grantgasser/Task1b
6,631,429,551,815
3a0be5876acabb9d94912c7173585fb6aa5c85f6
990fe671b2d83f5ee784b7f47484ca09974a1a8b
/task_1b_anna.py
ae5016db6bf1be58a597f2daefd7dd6a9cabbced
[]
no_license
https://github.com/grantgasser/Task1b
1ffed9ba15e26e906ccbd592c8ea9645be1e786d
c498eeffb0f1eaa8e7f2007059a8fa99db77abd5
refs/heads/master
2021-09-10T12:44:28.427234
2018-03-26T13:40:17
2018-03-26T13:40:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pandas as pd import numpy as np from sklearn.model_selection import KFold from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn import linear_model trainfilename = '/Users/Anna/polybox/IntroductionML/Tasks/01/task1b_ow9d8s/train.csv' trainfile = pd.read_csv(trainfilename, delimiter = ',') X = trainfile._drop_axis(['Id','y'], axis=1) y = trainfile['y'] X=np.array(X) #900x5 y=np.array(y) phi=X #900X5 phi=np.append(phi,X**2,axis=1) #900X10 phi=np.append(phi, np.exp(X), axis=1) #900x15 phi=np.append(phi, np.cos(X), axis=1) #900X20 phi=np.append(phi, np.ones((900,1)), axis=1) #900x21 #we need to split into a test set and a train set #to do so we could use eg. k-fold corss validation --> often k=5 or k=10 kf = KFold(n_splits=10) regr = linear_model.LinearRegression() #regr = linear_model.Lasso(alpha=1) #regr=Ridge(alpha=0.1) RMSE=[] possibleWeight =[] for train_index, test_index in kf.split(phi): weights=[] X_train, X_test = phi[train_index], phi[test_index] y_train, y_test = y[train_index], y[test_index] regr.fit(X_train, y_train) y_pred=regr.predict(X_test) RMSE.append(mean_squared_error(y_pred, y_test)**0.5) weights=regr.coef_ possibleWeight.append(weights) print(RMSE) print(np.min(RMSE)) indxminRMSE=np.argmin(RMSE) print(indxminRMSE) # output results d={'weight': possibleWeight[indxminRMSE]} output=pd.DataFrame(d) output.to_csv('task_1b_output.csv', index=False, header=False) #Insights: #RMSE with linear regression min(RMSE)= 9.716 #RMSE with Ridge regression, alpha = 0.1 -> min(RMSE)=9.7154 #RMSE wiht Lasse regression, alpha = 0.1 -->min(RMSE)=9.686
UTF-8
Python
false
false
1,672
py
5
task_1b_anna.py
2
0.712919
0.670455
0
58
27.827586
85
torebre/bpl_python
9,921,374,481,462
c79022144cc95d0a6a2f3a1bc4188730d475ca58
7dec3fbe0e31e977a1a0dd7cdc80f01871e82dbe
/bpl/PS.py
049f577ea6ed65cd400927eb0e02f2b069edcb5b
[]
no_license
https://github.com/torebre/bpl_python
106872180ff4eed745d51cf91c60ceeb49360c6c
9be76d0844feab7f518bbdfc81a9b35e3d02360f
refs/heads/master
2020-03-07T00:59:24.864518
2018-04-05T20:09:59
2018-04-05T20:09:59
127,171,326
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# function PM = defaultps class PS: # Library to use libname = 'library' # Model parameters K = 5 # number of particles to use in search algorithm image model parameters ink_ncon = 2 # number of convolutions imsize = (105, 105) # image size ink_pp = 2 # amount of ink per point ink_max_dist = 2 # distance between points to which you get full ink ink_a = 0.5 # ink parameter 1 ink_b = 6 # ink parameter 2 # Creating a trajectory from a spline spline_max_neval = 200 # maxmium number of evaluations spline_min_neval = 10 #minimum spline_grain = 1.5 # 1 traj.point for every this many units pixel distance) # Max / min noise parameters for image model max_blur_sigma = 16 # blur kernel width min_blur_sigma = 0.5 max_epsilon = 0.5 # pixel flipping min_epsilon = 1e-4 # search parameters max_affine_scale_change = 2 # scale changes must be less than a factor of 2 max_affine_shift_change = 50 # shift changes must less than this # MCMC PARAMETERS # details about the chain mcmc_samp_type_chain = 200 # number of samples to take in the MCMC chain ( for classif.) mcmc_nsamp_type_store = 10 # number of samples to store from this chain ( for classif.) mcmc_nsamp_token_chain = 25 # for completion (we take last sample in this chain) # mcmc proposal parameters (Note these are based on lib.tokenvar # parameters, although here they are hard - coded for convenience) mcmc_prop_gpos_sd = 1 # global position move mcmc_prop_shape_sd = 3 / 2 # shape move mcmc_prop_scale_sd = 0.0235 # scale move mcmc_prop_relmid_sd = 0.2168 # attach relation move mcmc_prop_relpos_mlty = 2 # multiply the sd of the standard position noise by this to propose new positions from prior # end
UTF-8
Python
false
false
1,811
py
31
PS.py
31
0.681944
0.651022
0
46
38.369565
122
firewut/data-transform-pipelines-api
5,325,759,495,397
a003f2715c0c7cc4a0597fc3f717fce88af1c360
c1868c08b8a5e5754cf2ac35da2edf8a0b188459
/src/projects/workers/random_image.py
21c0c090b2d6057d908dd8eabc81f470f912003c
[ "MIT" ]
permissive
https://github.com/firewut/data-transform-pipelines-api
336163cf86423be6b2ebaec73fccd2fb5294ef33
8c46bf72759478a1bdac3323c88ae7e9e1e7714b
refs/heads/master
2022-02-05T10:20:19.697745
2022-01-26T19:02:11
2022-01-26T19:02:11
154,260,980
2
0
MIT
false
2018-11-27T20:35:22
2018-10-23T04:14:46
2018-11-27T12:10:16
2018-11-27T20:35:22
816
0
0
0
Python
false
null
from PIL import Image import numpy from projects.workers.base import Worker class RandomImage(Worker): id = "random_image" name = "random_image" image = "https://upload.wikimedia.org/wikipedia/commons/e/e1/Ideal_chain_random_walk.png" description = "Make a random Image" ui_schema = { "ui:order": [ "width", "height", ], } schema = { "type": "object", "required": ["in_config"], "properties": { "in": {"type": "null", "description": "takes no input data"}, "in_config": { "type": "object", "properties": { "width": { "type": "integer", "minimum": 0, }, "height": { "type": "integer", "minimum": 0, }, }, "required": ["width", "height"], }, "in_config_example": {"width": 100, "height": 100}, "out": { "description": "randomized image", "type": "file", }, }, } def process(self, data=None): in_config = self.pipeline_processor.in_config width = in_config.get("width") height = in_config.get("height") imarray = numpy.random.rand(height, width, 3) * 255 image = Image.fromarray(imarray.astype("uint8")).convert("RGBA") _file = self.request_file() image.save(_file.path, "png") image.close() return _file
UTF-8
Python
false
false
1,692
py
90
random_image.py
79
0.43026
0.421986
0
58
27.172414
93
mono-g/mayaTool
10,230,612,145,711
79b7aacbd85109bc7c5a937378b970017feb4558
d8133d7629d94c9d7f0837ec4ca4374024f570bb
/cleanup_preference/cleanup_preference.py
1d408c5a5b33f6129dc13e4e9693e3902fa6fc2d
[]
no_license
https://github.com/mono-g/mayaTool
1f3827b9f002e3c27a4c44629b3f805da4f4f159
5a73907480a5a630accd0878a03a796ca51d9f2c
refs/heads/master
2021-04-09T10:45:17.758521
2019-12-13T11:07:43
2019-12-13T11:07:43
125,455,826
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- ########################################################## # # @brief clean up preference # @file cleanup_preference.py # @author Satoshi Gonokami # # Copyright(C) 2018 Satoshi Gonokami. # # [log]\n # 2018/03/22 作成\n # ########################################################## from __future__ import absolute_import, division, print_function import os import sys import shutil import subprocess from datetime import datetime scriptDir = os.path.abspath(os.path.dirname(__file__)).replace('\\', '/') prefDir = os.environ['USERPROFILE'].replace('\\', '/') + '/Documents/maya/[mayaVer]/prefs' mayaexe = os.path.dirname(os.environ['MAYA_LOCATION']) + '/Maya[mayaVer]/bin/maya.exe' windowPrefFiles = ['shelves', 'windowPrefs.mel'] hotkeyPrefFiles = ['hotkeys', 'userRunTimeCommands.mel', # ちょっと怪しい 'userNamedCommands.mel'] # --------------------------------------------------------- # make directory (if directory doesn't exist) # --------------------------------------------------------- # @param <str>directory : directory # @return None def makeDir(directory): if not os.path.exists(directory): os.makedirs(directory) print("# make Directory : '" + directory + "' #") # --------------------------------------------------------- # check process name # --------------------------------------------------------- # @param <str>procName : process name # @return <str/List>pidList : pid list def checkProcess(procName): procParamList = [] pidList = [] cmd = ['tasklist', '/FO', 'CSV', '/NH', '/FI', ('IMAGENAME eq ' + procName + '.exe')] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) for line in proc.stdout: procParamList.append(line.replace("\r\n", '').replace('"', '').split(",")) for pparam in procParamList: if len(pparam) > 1: pidList.append(pparam[1]) return pidList # --------------------------------------------------------- # task kill by PID # --------------------------------------------------------- # @param <str>pid : process ID # @return <bool> def taskKill(pid): killcmd = ['taskkill', '/pid', pid, '/f'] try: subprocess.Popen(killcmd) return True except Exception: return False # --------------------------------------------------------- # back up directory # --------------------------------------------------------- # @param <str>tgtDir : target directory # @return <str>backDir : back up directory def backupDirs(tgtDir): filetimestamp = os.stat(tgtDir).st_mtime filetime = datetime.fromtimestamp(filetimestamp).strftime('%Y%m%d%H%M') backDir = tgtDir + '_' + filetime try: os.rename(tgtDir, backDir) # prefsフォルダを見て引継ぐかどうかを決めるので、あらかじめ作っておいてダイアログ出さないように makeDir(tgtDir) return backDir except Exception: print("## ERR : Couldn't back up preference ##") return None # --------------------------------------------------------- # start up and close maya # --------------------------------------------------------- # @param <str>mayaVer : maya version # @return None def runMaya(mayaVer): global mayaexe curMayaexe = mayaexe.replace('[mayaVer]', mayaVer) cmd = [curMayaexe.replace('maya.exe', 'mayabatch.exe'), '-command', 'quit -force;'] pipe = subprocess.Popen(cmd, stderr=subprocess.PIPE) outs, errs = pipe.communicate() # --------------------------------------------------------- # copy setting files # --------------------------------------------------------- # @param <str>curPrefDir : preference dorectory # @param <str>backDir : back up directory # @param <bool>hkeyFlag : flag restoring hotkey # @return None def copySetting(curPrefDir, backDir, hkeyFlag=True): global windowPrefFiles global hotkeyPrefFiles if hkeyFlag is True: tgPrefFiles = windowPrefFiles + hotkeyPrefFiles else: tgPrefFiles = windowPrefFiles for tpref in tgPrefFiles: backPrefpath = backDir + '/' + tpref prefpath = curPrefDir + '/' + tpref if os.path.exists(backPrefpath): if os.path.isdir(backPrefpath): if os.path.exists(prefpath): shutil.rmtree(prefpath) shutil.copytree(backPrefpath, prefpath) else: shutil.copy2(backPrefpath, prefpath) # --------------------------------------------------------- # main # --------------------------------------------------------- # @param <str>mayaVer : maya version # @param <bool>hkeyFlag : flag restoring hotkey # @return None def main(mayaVer, hkeyFlag=True): global prefDir # err check if not mayaVer.isdigit(): print("## ERR : Please Enter a Number ##") return curPrefDir = prefDir.replace('[mayaVer]', mayaVer) if not os.path.exists(curPrefDir): print("## ERR : Please Enter Installed Maya version ##") return # close maya pidList = checkProcess('maya') if pidList: print("# Maya is Runnnig. #") print("# May I force shut-down Maya? #") inputStr = raw_input('>> y/n :') if inputStr == 'y' or inputStr == 'yes': for pid in pidList: taskKill(pid) else: print("# Canceled Task. #") print("# Please shut-down Maya. #") return # back up prefs backDir = backupDirs(curPrefDir) if backDir is None: return # start up maya pipe = runMaya(mayaVer) # copy setting files copySetting(curPrefDir, backDir, hkeyFlag) # ---------------------------------------------------------------------------- if __name__ == '__main__': if (sys.argv[2] == 'True' or sys.argv[2] == '1'): argv = True elif sys.argv[2] == '0': argv = False else: argv = False main(sys.argv[1], argv)
UTF-8
Python
false
false
6,046
py
9
cleanup_preference.py
6
0.507239
0.503535
0
192
29.9375
90
FYP-2018/Text-Processing
15,607,911,197,356
920fcc22ecd02d0fee1621a5c640fcae660c5e5e
fc2c91e0fa20894323600787116e99a82c2df79d
/Corpus.py
ec8610543cf2940f2f2e068bc8633f02f9dbc2b3
[]
no_license
https://github.com/FYP-2018/Text-Processing
90fcb76589c325bc7db3fb63d712b0eea573837b
1818475bd2d168a4368e2d8964b9bedeaab32eb4
refs/heads/master
2020-04-09T12:34:31.251533
2018-12-05T04:10:24
2018-12-05T04:10:24
160,355,844
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import thulac from PreprocessUtils import readFile class Corpus(): def __init__(self): pass def _load_thulac(self, user_dict=None): print("initializing thulac...") if user_dict == None: self.thu = thulac.thulac(seg_only=True) else: self.thu = thulac.thulac(user_dict=user_dict, seg_only=True) def parse(self, inp_dirs, outp_dirs, type='word', user_dict=None): """ Args: param1 (str): a list of input files directories param2 (str): a list of expected onput files directories param3 (:obj:`str`, optional): options for parsing criteria, 'word'/'char' Defaults to word. 'word' - segment 'char' - return each line of doc as a list of word """ if len(inp_dirs) != len(outp_dirs): raise ValueError("Number of input directories should be same with output directories") if type != 'word' and type != 'char': raise ValueError("Invalid parsing type! Need to be either 'word' or 'char'") if type == 'word': self._load_thulac(user_dict=user_dict) for inp_dir, outp_dir in zip(inp_dirs, outp_dirs): print("converting {} to {}".format(inp_dir, outp_dir)) self.thu.cut_f(inp_dir, outp_dir) elif type == 'char': for inp_dir, outp_dir in zip(inp_dirs, outp_dirs): print("converting {} to {}".format(inp_dir, outp_dir)) fout = open(outp_dir, 'w', encoding='utf-8') for line in readFile(inp_dir, format='plain'): line = ' '.join(line) # since str is intrinsically a list of char fout.write(line.strip() + '\n') fout.close() num_files = len(len(inp_dirs)) print("processed {} docs in {}".format(num_files, fdir)) print("Finished Parsing") def clean_corpus(self, corp_inps, dict_inps, outps, dict_type='keep'): """ Process the input corpus accoding to dict: either keep the words in dict or remove the words in dict Args: param 1, 2, 3 (list): a list of file directory for corpus, dict, and output corpus param 4 (str): 'keep' or 'remove' 'keep': only keep the words in dict 'remove': remove the words in dict from original corpus """ for corp_inp, dict_inp, outp in zip(corp_inps, dict_inps, outps): print(corp_inp) with open(dict_inp, 'r', encoding='utf-8') as dict_f: dict = [w.strip() for w in dict_f.readlines()] print("{} words in loaded dict".format(len(dict))) num_file = 0 with open(outp, 'w', encoding='utf-8') as outp_f: for line in readFile(corp_inp, format='list'): if dict_type == 'keep': line = [w for w in line if w in dict] elif dict_type == 'remove': line = [w for w in line if w not in dict] if num_file % 50 == 0: print("{} files processed".format(num_file)) num_file += 1 outp_f.write(' '.join(line) + '\n')
UTF-8
Python
false
false
3,432
py
18
Corpus.py
6
0.508741
0.504371
0
85
39.376471
98
AkhilReddykasu/Functions
11,166,914,992,843
264eb65ab2d0199b58db3489d3cf29f2854a7a4b
0d1059d3195868be6feb0397d931fd5126f1bb65
/10_Sum of all num in list.py
18a85274094bd9754e347ded068a1197eea0272d
[]
no_license
https://github.com/AkhilReddykasu/Functions
62b4b6fa9b62f172c1fdfd206a7365d7b8ad94cb
c1af8db1350152168b1a5ac61c990ed589a9e129
refs/heads/master
2022-11-20T15:43:18.769387
2020-07-17T15:51:01
2020-07-17T15:51:01
280,464,611
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""Sum of all numbers in list""" li = [] n = int(input("Enter the length of list:")) for i in range(0, n): ele = int(input('Enter the value:')) li.append(ele) print("Elements in the list:", li) def add_list(l): sum1 = 0 for j in l: sum1 += j return sum1 print("Sum of elements in list:", add_list(li))
UTF-8
Python
false
false
354
py
19
10_Sum of all num in list.py
19
0.550847
0.536723
0
18
17.666667
47
Jelly815/housepage
15,298,673,556,542
aaca946f8e19de37d2a2004e450bb37676a5b0a8
db3f9c53cc9e7005ce3a243f606d0723583532a9
/python/function.py
f9909281542c83e4a85b6b6da161c8d7e465148a
[]
no_license
https://github.com/Jelly815/housepage
efbd79749017e4217a6bc94b2429a419b384cc8d
0eca073fd4333fd53ad0868cb3751a612250fcaa
refs/heads/master
2023-03-31T19:20:15.363506
2023-03-22T08:39:55
2023-03-22T08:39:55
154,009,771
0
0
null
false
2023-03-22T08:39:57
2018-10-21T13:12:13
2019-03-28T03:03:46
2023-03-22T08:39:55
5,004
0
0
0
PHP
false
false
# -*- coding: utf-8 -*- """ Created on Sun Jan 13 00:58:08 2019 @author: Jelly """ from db_connect import DB_CONN import setting import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import datetime from collections import defaultdict class FUNC_CLASS(DB_CONN): def __init__(self,user_id): super().__init__() # 項目總數 self.items_len = len(setting.similar_list + setting.range_list) self.user_id = user_id # 取得A曾經評價低於普通的房子 get_bad_houses = self.get_bad_houses() self.bad_houses = ','.join((str(num) for num in get_bad_houses)) # 取得最早的上線時間 def get_user_login_time(self): login_day = 0 login_time_sql = """ SELECT `last_time` FROM `ex_record` ORDER BY `last_time` LIMIT 1 """ try: self.execute(login_time_sql) record_arr = self.fetchall() if record_arr: login_time = str(record_arr[0]['login_time']) today = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") start = datetime.datetime.strptime(login_time, '%Y-%m-%d %H:%M:%S') end = datetime.datetime.strptime(today, '%Y-%m-%d %H:%M:%S') login_day = (end - start).days except: login_day = 0 return login_day # 取得評分推薦權重 def get_weight(self): re_main = {} default_sql = """ SELECT (CASE WHEN ROUND(SUM(`math_`), 2) > 1 THEN 1 ELSE ROUND(SUM(`math_`), 2) END) AS 'weight' FROM `ex_score_analysis` WHERE """ #like_sql = default_sql + " `type_` = 'like' AND `value_` >= 3" user_sql = default_sql + " `type_` = 'user' AND `value_` >= 3" nolike_sql = default_sql + " `type_` = 'nolike' AND `value_` >= 3" content_sql = default_sql + " `type_` = 'content' AND `value_` >= 3" hot_sql = default_sql + " `type_` = 'hot' AND `value_` >= 3" new_sql = default_sql + " `type_` = 'new' AND `value_` >= 3" search_sql = default_sql + " `type_` = 'search' AND `value_` >= 3" try: #self.execute(like_sql,[]) #like_sql_arr = self.fetchall() #re_main['like'] = like_sql_arr[0]['weight'] if like_sql_arr[0]['weight'] else 0 self.execute(user_sql,[]) user_sql_arr = self.fetchall() re_main['user'] = user_sql_arr[0]['weight'] if user_sql_arr[0]['weight'] else 0 self.execute(nolike_sql,[]) nolike_sql_arr = self.fetchall() re_main['nolike'] = nolike_sql_arr[0]['weight'] if nolike_sql_arr[0]['weight'] else 0 self.execute(content_sql,[]) content_sql_arr = self.fetchall() re_main['content'] = content_sql_arr[0]['weight'] if content_sql_arr[0]['weight'] else 0 self.execute(hot_sql,[]) hot_sql_arr = self.fetchall() re_main['hot'] = hot_sql_arr[0]['weight'] if hot_sql_arr[0]['weight'] else 0 self.execute(new_sql,[]) new_sql_arr = self.fetchall() re_main['new'] = new_sql_arr[0]['weight'] if new_sql_arr[0]['weight'] else 0 self.execute(search_sql,[]) search_sql_arr = self.fetchall() re_main['search'] = search_sql_arr[0]['weight'] if search_sql_arr[0]['weight'] else 0 re_main = sorted(re_main.items(), key=lambda pair: pair[1], reverse=True) except: re_main = [] return re_main # 取得A曾經評價不喜歡的房子 def get_bad_houses(self): re_main = [] user_sql = """ SELECT `main_id` FROM `ex_score` WHERE `user_id` = %s AND `score` = 1 """ try: self.execute(user_sql,[self.user_id]) user_main_arr = self.fetchall() re_main = [x['main_id'] for x in user_main_arr] except: re_main = [] return re_main # 取得A曾經搜尋過的條件 def get_user_all_record(self,date_time=0): user_record = [] time_str = '' if self.user_id != '': try: where = [self.user_id] if date_time == 1: time_sql= "SELECT `last_time` "+\ "FROM `ex_record` "+\ "WHERE `user_id`= %s "+\ "ORDER BY `last_time` DESC,`times` DESC LIMIT 1" self.execute(time_sql,[self.user_id]) time_arr = self.fetchall() time_str = time_arr[0]['last_time'] record_sql = """ SELECT DISTINCT `area` FROM `ex_record` WHERE `user_id` = %s AND """ record_sql += " `last_time` = '"+str(time_str)+"'" if str(time_str) != '' else " `last_time` BETWEEN (NOW() - INTERVAL "+str(setting.search_house_days)+" DAY) AND NOW()" self.execute(record_sql,where) area_arr = self.fetchall() area_arr = [x['area'] for x in area_arr] record_sql = """ SELECT DISTINCT `price` FROM `ex_record` WHERE `user_id` = %s AND """ record_sql += " `last_time` = '"+str(time_str)+"'" if str(time_str) != '' else " `last_time` BETWEEN (NOW() - INTERVAL "+str(setting.search_house_days)+" DAY) AND NOW()" self.execute(record_sql,where) price_arr = self.fetchall() price_arr = [x['price'] for x in price_arr] record_sql = """ SELECT DISTINCT `ping` FROM `ex_record` WHERE `user_id` = %s AND """ record_sql += " `last_time` = '"+str(time_str)+"'" if str(time_str) != '' else " `last_time` BETWEEN (NOW() - INTERVAL "+str(setting.search_house_days)+" DAY) AND NOW()" self.execute(record_sql,where) ping_arr = self.fetchall() ping_arr = [x['ping'] for x in ping_arr] record_sql = """ SELECT DISTINCT `style` FROM `ex_record` WHERE `user_id` = %s AND """ record_sql += " `last_time` = '"+str(time_str)+"'" if str(time_str) != '' else " `last_time` BETWEEN (NOW() - INTERVAL "+str(setting.search_house_days)+" DAY) AND NOW()" self.execute(record_sql,where) style_arr = self.fetchall() style_arr = [x['style'] for x in style_arr] record_sql = """ SELECT DISTINCT `type` FROM `ex_record` WHERE `user_id` = %s AND """ record_sql += " `last_time` = '"+str(time_str)+"'" if str(time_str) != '' else " `last_time` BETWEEN (NOW() - INTERVAL "+str(setting.search_house_days)+" DAY) AND NOW()" self.execute(record_sql,where) type_arr = self.fetchall() type_arr = [x['type'] for x in type_arr] user_record = [area_arr,price_arr,ping_arr,style_arr,type_arr] except: user_record = [] return user_record def get_user_all_record_items(self,user_all_record,recommand_items,orderby=''): area_str = ','.join(str(num) for num in user_all_record[0]) if len(user_all_record) > 0 else '' style_str = ','.join(str(num) for num in user_all_record[3]) if len(user_all_record[3]) > 0 else '' type_str = ','.join(str(num) for num in user_all_record[4]) if len(user_all_record[4]) > 0 else '' id_str = ','.join(str(num) for num in recommand_items) if len(recommand_items) > 0 else '' hot_house_sql = "SELECT `id` FROM `ex_main` WHERE `is_closed` = 0 AND " hot_house_sql += '`area` IN ('+area_str+') AND ' if area_str != '' else '' #價格 num = 0 num2 = 0 price_arr = {'300':'0','600':'300','1000':'600','1500':'1000','2000':'1500','2001':''} if len(user_all_record[1]) == 1: if user_all_record[1][0] == '2001': num = user_all_record[1][0] num2 = '5000' else: num = price_arr[str(user_all_record[1][0])] num2 = user_all_record[1][0] else: for x in user_all_record[1]: if num == 0 and num2 == 0: num = x num2 = x elif x > num2: num2 = x else: num = x hot_house_sql += '`price` BETWEEN '+str(num)+' AND '+str(num2)+' AND ' if len(user_all_record[1]) > 0 else '' #坪數 num = 0 num2 = 0 ping_arr = {'20':'0','30':'20','40':'30','50':'40','51':''} if len(user_all_record[2]) == 1: if user_all_record[2][0] == '51': num = user_all_record[2][0] num2 = '100' else: num = ping_arr[str(user_all_record[2][0])] num2 = user_all_record[2][0] else: for x in user_all_record[2]: if num == 0 and num2 == 0: num = x num2 = x elif x > num2: num2 = x else: num = x hot_house_sql += '`ping` BETWEEN '+str(num)+' AND '+str(num2)+' AND ' if len(user_all_record[2]) > 0 else '' hot_house_sql += '`room` IN ('+style_str+') AND ' if style_str != '' else '' hot_house_sql += '`type` IN ('+type_str+') AND ' if type_str != '' else '' hot_house_sql += '`id` IN ('+id_str+') AND ' if id_str != '' else '' hot_house_sql = hot_house_sql.rstrip('AND ') if orderby != '': hot_house_sql += orderby #print(hot_house_sql) self.execute(hot_house_sql,[]) result = self.fetchall() result = [x['id'] for x in result] return result # 取得user的搜尋紀錄(喜歡) def get_this_user_search(self): user_record = {} user_record['last_record'] = [] user_record['often_record'] = [] if self.user_id != '': user_sql = """ SELECT `area`,`price`,`ping`,`style`,`type` FROM `ex_record` WHERE `user_id` = %s AND `last_time` BETWEEN (NOW() - INTERVAL %s DAY) AND NOW() """ try: # 取得user [最後]搜尋的條件 self.execute(user_sql+" ORDER BY `last_time` DESC,times DESC LIMIT 1",\ [self.user_id,setting.search_house_days]) user_last_arr = self.fetchall() if(user_last_arr is not None): for x, last in enumerate(user_last_arr): user_record['last_record'].append([last['area'],last['price'],last['ping'],last['style'],last['type']]) # 取得user [經常]搜尋的條件 self.execute(user_sql+" GROUP by `area`,`price`,`ping`,`style`,`type` \ ORDER BY `times` DESC,`last_time` DESC,`price`,`ping` DESC LIMIT 3",\ [self.user_id,setting.search_house_days]) user_often_arr = self.fetchall() #print('user_often_arr',user_often_arr) if user_often_arr is not None: for x, often in enumerate(user_often_arr): user_record['often_record'].append([often['area'],often['price'],often['ping'],often['style'],often['type']]) except: user_record = {} user_record['last_record'] = [] user_record['often_record'] = [] return user_record # 取得user的搜尋紀錄(不喜歡) def get_this_user_no_search(self): user_recommend = [] if self.user_id != '': # 取得user半年內的搜尋紀錄 user_today_sql = """ SELECT `area`,`price`,`ping`,`style`,`type` FROM `ex_user_record_view_not` WHERE `user_id` = %s AND `last_time` BETWEEN (NOW() - INTERVAL %s DAY) AND NOW() GROUP BY `area`,`price`,`ping`,`style`,`type` ORDER BY `last_time` DESC """ try: self.execute(user_today_sql,[self.user_id,setting.search_house_days]) user_today_arr = self.fetchall() users = [] if len(user_today_arr) > 0: for x, user_today in enumerate(user_today_arr): record = [user_today['area'],user_today['price'],user_today['ping'],user_today['style'],user_today['type']] # 取得非user有一樣不喜歡記錄的人 users.extend(val['user_id'] for y,val in enumerate(self.get_same_record(self.user_id,record,1,1))) users = list(set(users)) if len(users) > 0: for unid in users: # 取得非user喜歡的房子 times_range_items = self.get_times_range_items(unid,record) if times_range_items: user_recommend.extend(times_range_items) except: user_recommend = [] return list(set(user_recommend)) # 依內容比對(喜歡)(排除評價不好的) def get_this_user_content(self,users_items): user_recommend = [] users_items = ','.join(str(num) for num in users_items) if users_items != '' else '0' if self.user_id != '': # 取得ex_user_record_view半年內的搜尋紀錄 user_today_sql = """ SELECT `area`,`price`,`ping`,`style`,`type`,`main_id` FROM `ex_user_record_view` WHERE `user_id` = %s AND `last_time` BETWEEN (NOW() - INTERVAL %s DAY) AND NOW() """ user_today_sql += ' AND `main_id` IN ('+users_items+')'+\ ' ORDER BY `last_time` DESC' try: self.execute(user_today_sql,[self.user_id,setting.search_house_days]) user_today_arr = self.fetchall() user_items_arr = self.get_stay_items(user_today_arr) if len(user_today_arr) > 0: user_today_sql = 'SELECT ' if len(user_items_arr.keys()) > 0: for x in user_items_arr.keys(): user_today_sql += '`'+x+'`,' user_today_sql = user_today_sql.rstrip(',') + ' FROM `ex_main` WHERE `id` = %s' #print('user_today_sql',user_today_sql) for x, user_today in enumerate(user_today_arr): self.execute(user_today_sql,[user_today['main_id']]) this_user_mains = self.fetchall() for x,val in enumerate(this_user_mains): new_row = list(val) #print(new_row) for i in new_row: if i == 'description': #主建物坪數 user_items_arr[i].append("") elif val[i] is not None: user_items_arr[i].append(val[i]) #print(user_items_arr) new_val = {} if len(user_items_arr['area']) > 0: for item_type,record_items in user_items_arr.items(): if item_type == 'area' and len(record_items) == 0: continue elif item_type in setting.out_items: continue new_val[item_type] = [] # 比對是否有一樣的 if item_type in setting.similar_list and len(record_items) > 0: chk = {} item_len = len(record_items) #print(record_items) # 計算項目的次數 for x in record_items: if chk.get(x): chk[x] += 1 else: chk[x] = 1 # 依設定similar_percent列入該項目是否為喜歡 suggestions = sorted(chk.items(), key=lambda pair: pair[1], reverse=True) #print(suggestions) suggestion = [x[0] for x in suggestions if int(x[1]) >= int(item_len / len(suggestions))] #print(suggestions) new_val[item_type].extend(suggestion) # 比對是否在範圍內 elif item_type in setting.range_list and len(record_items) > 0: values = list(map(lambda x: float(x), record_items)) # 計算平均值(this user) mean_num_user = np.mean(values) # 價格 if item_type == 'price': if mean_num_user <= 300: user_val = [0,300] elif 300 < mean_num_user <= 600: user_val = [300,600] elif 600 < mean_num_user <= 1000: user_val = [600,1000] elif 1000 < mean_num_user <= 1500: user_val = [1000,1500] elif 1500 < mean_num_user <= 2000: user_val = [1500,2000] elif mean_num_user > 2000: user_val = [2000,5000] new_val[item_type].extend(user_val) elif item_type == 'ping': #坪數 if mean_num_user <= 20: user_val = [0,20] elif 20 < mean_num_user <= 30: user_val = [20,30] elif 30 < mean_num_user <= 40: user_val = [30,40] elif 40 < mean_num_user <= 50: user_val = [40,50] elif mean_num_user > 50: user_val = [51,100] new_val[item_type].extend(user_val) else: # 計算標準差 std_num = np.std(record_items) # 如果只有一筆資料 if std_num == 0: std_num = int(record_items[0] * setting.range_percent) # 計算範圍值 star_num = int(mean_num_user - std_num) end_num = int(mean_num_user + std_num) new_val[item_type].extend([star_num,end_num]) #print(new_val) try: # 依照內容,找到喜愛的物件 user_record_arr = self.get_stay_items_houses(new_val,users_items) user_recommend = [int(x['id']) for x in user_record_arr] except: user_record_arr = {} except: user_recommend = [] return list(set(user_recommend)) # 取得User在意項目 def get_stay_items(self,user_today_arr): # ex_record_items_stay 半年內的搜尋紀錄 user_stay_sql = """ SELECT `type_key`,`type_value` FROM `ex_record_items` items,`ex_record_items_stay` stay WHERE `items`.`id` = `stay`.`record_items_id` and `items`.`user_id` = %s AND `stay`.`type_key` != 'stay_time' AND `items`.`last_time` BETWEEN (NOW() - INTERVAL %s DAY) AND NOW() """ try: self.execute(user_stay_sql,[self.user_id,setting.search_house_days]) user_stay_arr = self.fetchall() stay_count_arr = {} stay_count = 0 for _,x in enumerate(user_stay_arr): stay_count += x['type_value'] if stay_count_arr.get(x['type_key']): stay_count_arr[x['type_key']] += x['type_value'] else: stay_count_arr[x['type_key']] = x['type_value'] stay_count_arr = sorted(stay_count_arr.items(), key=lambda pair: pair[1], reverse=True) user_items_arr = {} # 如果在意項目大於0 和 大於瀏覽房子數,才列入他有此習慣 if len(stay_count_arr) > 0 and stay_count >= len(user_today_arr): # 計算平均數 stay_avg = math.ceil(stay_count/len(stay_count_arr)) # 取得在意項目 stay_count_arr = [x[0] for _,x in enumerate(stay_count_arr) if x[1] >= stay_avg] # 加上基本搜尋項目 stay_count_arr = set(stay_count_arr+setting.basic_list) for x in stay_count_arr: user_items_arr[x] = [] else: ''' user_items_arr = { 'area':[],'road':[],'room':[], 'ping':[],'parking':[], 'type':[],'direction':[], 'price':[],'status':[] }''' user_items_arr = { 'area':[],'room':[],'ping':[], 'type':[],'price':[] } except: user_items_arr = [] return user_items_arr # 取得在意項目的搜尋結果(獲得房屋ID) def get_stay_items_houses(self,new_val,users_items): where_sql = '' length = 1 for x,items in new_val.items(): if x in setting.similar_list: where_sql += ' `'+x+'` IN ('+','.join(str(e) for e in items)+') ' elif x in setting.range_list: where_sql += ' `'+x+'` BETWEEN '+str(items[0])+' AND '+str(items[1]) where_sql += ' AND ' if length < len(new_val) else '' length += 1 # 尋找相似的房子 user_record_sql = 'SELECT `id` '+\ 'FROM `ex_main` '+\ 'WHERE '+where_sql+' AND `id` NOT IN ('+(self.bad_houses+','+users_items if self.bad_houses != '' else users_items)+')'+\ ' AND `is_closed` = 0' self.execute(user_record_sql,[]) user_record_arr = self.fetchall() return user_record_arr # 取得非user有相同記錄的人 def get_same_record(self,user_id,record,limit=1,notlike = 0): record_arr = {} db_str = '`ex_user_record_view_not`' if notlike == 1 else '`ex_record`' # 取得user record record_sql = "SELECT `user_id` FROM "+ db_str+ " WHERE `user_id` != %s AND "+\ "`area` = %s AND "+\ "`price` = %s AND "+\ "`ping` = %s AND "+\ "`style` = %s AND "+\ "`type` = %s" record_vals = [ user_id, record[0],record[1],record[2], record[3],record[4] ] try: if(record): self.execute(record_sql,record_vals) record_arr = self.fetchall() else: # 若setting.search_house_days設定的天數找不到資料,則往回推天數,直到找到資料 limit += 1 login_last_day = self.get_user_login_time() if login_last_day > setting.search_house_days: limit_round = int(login_last_day / setting.search_house_days) if limit <= limit_round: self.get_same_record(user_id,record,limit) except: record_arr = {} return record_arr # 取得某位User瀏覽物件的資料(排除評價不好的) def get_times_range_items(self,user_id,record): record_arr = {} is_favorite_items = [] new_arr = { 'user_id':[], 'record_times':[], 'main_id':[], 'items_times':[], 'click_map':[], 'add_favorite':[], 'item_stay_time':[] } record_sql = """ SELECT `user_id`,`record_times`,`main_id`,`items_times`,`click_map`, `add_favorite`,`item_stay_time` FROM `ex_user_record_view` WHERE `user_id` = %s """ record_sql += (' AND `main_id` NOT IN ('+self.bad_houses+')' if self.bad_houses != '' else '') +\ ' ORDER BY `item_stay_time`' record_vals = [user_id] try: if record: #print(record_sql,record_vals) self.execute(record_sql,record_vals) record_arr = self.fetchall() if record_arr: for _, record in enumerate(record_arr): for key,val in record.items(): new_arr[key].append(val) # 如果User有加入最愛的習慣1;反之0 is_like = 1 if sum(new_arr['add_favorite']) > 0 else 0 user_time_range = 0 if is_like == 0: # 刪除[物件停留時間]離群值 is_outlier = True outlier_index = self.get_outlier(new_arr,'item_stay_time') while is_outlier: if outlier_index[1] is not None: del record_arr[outlier_index[1]] for x in new_arr: del new_arr[x][outlier_index[1]] is_outlier = False else: is_outlier = False is_favorite_items = [] if new_arr['item_stay_time']: # 取得該User瀏覽區間 user_time_range = self.get_user_time_range(list(set(new_arr['item_stay_time']))) # 取得該範圍內,User的物件 is_favorite_items = self.get_is_favorite(new_arr,['main_id','add_favorite','item_stay_time'],user_time_range,is_like) except: is_favorite_items = [] return is_favorite_items # 取得分類最熱門的房子(no_data=0,有1筆記錄;no_data=1:有1筆記錄,但資料不足;no_data=:完全沒資料) def get_hot_house(self,record,no_data=0,user_id='',items=[]): hot_house_vals = [] hot_house_sql = """ SELECT `id` FROM `ex_main` """ if no_data == 1: hot_house_sql += """ WHERE `area` = %s AND `price`= %s AND `is_closed` = 0 ORDER BY `view_num` DESC,`update_time` DESC LIMIT 5 """ hot_house_vals = [record[0],record[3]] elif no_data == 2: user_area_sql = "SELECT `area_id` FROM `ex_user` WHERE unid = %s" self.execute(user_area_sql,[user_id]) user_area = self.fetchall() hot_house_vals = '' if user_area: hot_house_vals = [user_area[0]['area_id']] hot_house_sql += " WHERE `area` = %s AND " else: hot_house_vals = [setting.default_area] if hot_house_vals: hot_house_sql += " WHERE `area` = %s AND " else: hot_house_vals = [] for num in range(276,318) : hot_house_vals.append(num) hot_house_sql += " WHERE `area` IN ("+ ','.join((str(num) for num in hot_house_vals)) +") AND " hot_house_vals = [] hot_house_sql += """ `is_closed` = 0 ORDER BY `view_num` DESC,`update_time` DESC LIMIT 5 """ else: hot_house_sql += " WHERE " if len(items) > 0: hot_house_sql += " `id` NOT IN ("+','.join(str(e) for e in items)+") AND " # 價格 if record[1] == 300: hot_house_sql += " `price`<= 300 AND " elif record[1] == 600: hot_house_sql += " `price` BETWEEN 300 AND 600 AND " elif record[1] == 1000: hot_house_sql += " `price` BETWEEN 600 AND 1000 AND " elif record[1] == 1500: hot_house_sql += " `price` BETWEEN 1000 AND 1500 AND " elif record[1] == 2000: hot_house_sql += " `price` BETWEEN 1500 AND 2000 AND " elif record[1] == 2001: hot_house_sql += " `price` >= 2000 AND " #坪數 if record[2] == 20: hot_house_sql += " `ping`<= 20 AND " elif record[2] == 30: hot_house_sql += " `ping` BETWEEN 20 AND 30 AND " elif record[2] == 40: hot_house_sql += " `ping` BETWEEN 30 AND 40 AND " elif record[2] == 50: hot_house_sql += " `ping` BETWEEN 40 AND 50 AND " elif record[2] == 51: hot_house_sql += " `ping` >= 51 AND " hot_house_sql += """ `area` = %s AND `room`= %s AND `type` = %s AND `is_closed` = 0 ORDER BY `view_num` DESC,`update_time` DESC LIMIT 5 """ hot_house_vals = [record[0],record[3],record[4]] try: #print(hot_house_sql,hot_house_vals) if hot_house_vals: self.execute(hot_house_sql,hot_house_vals) else: self.execute(hot_house_sql) hot_house = self.fetchall() except: hot_house = [] return hot_house # 取得分位數 def quantile(self,data,percent): # 排除重複 new_data = set(data) p_index = int(percent * len(new_data)) return sorted(new_data)[p_index] # 某user喜歡物件的時間圓餅圖 def plt_pie(self,data): explode = [] df = pd.DataFrame(data) labels = df['main_id'] sizes = df['item_stay_time'] median_num = round(len(data['item_stay_time'])) for zero,_ in enumerate(data['item_stay_time']): explode.append(0.1 if (zero + 1) == median_num else 0) fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True) ax1.axis('equal') plt.show() # 取得離群值的資料index def get_outlier(self,data,field): del_index = None df = pd.DataFrame(data, columns=[field]) df_NaN = df[df.isnull().any(axis=1)].index.values # 刪除 NaN 值 df.dropna(inplace=True) if not df.empty: # abs:絕對值; abs(X - 平均值);mean:取均值 x_mean = abs(df[field] - df[field].mean()) # std:標準差,有 95% 信心估計母群體平均數,在樣本平均數 ± 1.96 * (母群體標準差 / 樣本數 n 的平方根) 的範圍內。 std_196 = 1.96*df[field].std() df['outlier'] = x_mean > std_196 # 刪除為True的資料 for x in range(len(df)): this_bool = df.iloc[x]['outlier'] del_index = x if this_bool else None return [df_NaN,del_index] # 取得該User瀏覽時間的中位數 def get_user_time_range(self,data): time_range = self.quantile(data,0.5) if data else 0 return time_range # 取得該時間範圍,User有加入最愛的物件 def get_is_favorite(self,data,columns_list,time_range,is_like): items_arr = [] df = pd.DataFrame(data, columns=columns_list) df = df[df['item_stay_time'] >= time_range] # 如果有加入最愛,則把該物件加入list if is_like == 1: df_favorite = df[df.add_favorite == 1] if not df_favorite.empty: for x in df_favorite.index: items_arr.append(df['main_id'][x]) else: items_arr = df.main_id elif is_like == 0: for x in data: if x == 'main_id': items_arr = [y for y in data[x]] return list(set(items_arr)) # 餘弦相似 def cosine_similarity(self,v, w): re_val = 0 # math.sqrt:平方根 sqrt_dot = math.sqrt(self.dot(v, v) * self.dot(w, w)) if sqrt_dot > 0: re_val = self.dot(v, w) / math.sqrt(self.dot(v, v) * self.dot(w, w)) return re_val # 點乘積(內積) def dot(self,v, w): """v_1 * w_1 + ... + v_n * w_n""" return sum(v_i * w_i for v_i, w_i in zip(v, w)) # 找出與某物件最類似的 def most_similar_items_to(self,items_id,similarities,unique_items): pairs = [(unique_items[other_items_id], similarity) for other_items_id, similarity in enumerate(similarities) if items_id != other_items_id and similarity > 0] return sorted(pairs, key=lambda pair: pair[1], reverse=True) def most_similar_users_to(self,user_id,user_similarities): pairs = [(other_user_id, similarity) # find other for other_user_id, similarity in # users with enumerate(user_similarities[user_id]) # nonzero if user_id != other_user_id and similarity > 0] # similarity return sorted(pairs, # sort them key=lambda pair: pair[1], # most similar reverse=True) # first # 基於項目推薦給User,大於0.5才推薦 def item_based_to_user(self,user_id,user_items_vector,similarities,unique_items,users_items, include_current_items=False): # 把相似的物件累加起來 suggestions = defaultdict(float) if len(user_items_vector) > 0: for item_id, is_like in enumerate(user_items_vector[user_id]): #print('item_id',user_items_vector[user_id]) if is_like == 1 and len(similarities) > 0: similar_likes = self.most_similar_items_to(item_id,similarities[user_id],unique_items) #print('similarities[user_id]',similarities[user_id]) #print('users_items[user_id]',users_items[user_id]) for item, similarity in similar_likes: #if(similarity < 1.0 and item not in users_items[user_id]): suggestions[item] += similarity # 依據權重進行排序 suggestions = sorted(suggestions.items(), key=lambda pair: pair[1], reverse=True) if include_current_items: return suggestions else: return [suggestion for suggestion, weight in suggestions if suggestion not in users_items[user_id] and float(weight) >= setting.similar_percent] # 基於User,大於0.5才推薦 def user_based_suggestions(self,user_id, user_similarities,users_items,include_current_interests=False): # 把相似的物件累加起來 suggestions = defaultdict(float) for other_user_id, similarity in self.most_similar_users_to(user_id,user_similarities): for interest in users_items[other_user_id]: suggestions[interest] += similarity # 依據權重進行排序 suggestions = sorted(suggestions.items(), key=lambda pair: pair[1], reverse=True) #sug_len = int(len(suggestions) * setting.similar_percent) #suggestions = suggestions[:sug_len] #if include_current_interests: # return suggestions #else: return [suggestion for suggestion, weight in suggestions if suggestion not in users_items[user_id] and weight > setting.similar_percent] # 取得主建物的值 def get_description(self,description): items_str = str(description) items_str = items_str.split('坪') items_str = items_str[0].split(':') return (items_str[1] if items_str[1] else '') # 檢查是否有已經close的物件,若有則取相似度最高的物件替換 def check_close(self,items): new_val = {} users_items = ','.join(str(i) for i in items) user_today_sql = "SELECT * FROM `ex_main` WHERE `id` IN "+\ "("+users_items+") AND `is_closed` = 1" try: self.execute(user_today_sql,[]) user_today_arr = self.fetchall() if len(user_today_arr) > 0: # 取得在意項目 user_items_arr = self.get_stay_items(user_today_arr) for x, user_today in enumerate(user_today_arr): for item_type,record_items in user_today.items(): if item_type in setting.out_items: continue # 相似 elif item_type in user_items_arr.keys() and item_type in setting.similar_list: new_val[item_type] = [record_items] # 比對是否在範圍內 elif item_type in user_items_arr.keys() and item_type in setting.range_list: # 價格 if item_type == 'price': if record_items <= 300: user_val = [0,300] elif 300 < record_items <= 600: user_val = [300,600] elif 600 < record_items <= 1000: user_val = [600,1000] elif 1000 < record_items <= 1500: user_val = [1000,1500] elif 1500 < record_items <= 2000: user_val = [1500,2000] elif record_items > 2000: user_val = [2000,5000] new_val[item_type] = user_val elif item_type == 'ping': #坪數 if record_items <= 20: user_val = [0,20] elif 20 < record_items <= 30: user_val = [20,30] elif 30 < record_items <= 40: user_val = [30,40] elif 40 < record_items <= 50: user_val = [40,50] elif record_items > 50: user_val = [51,100] new_val[item_type] = user_val else: std_num = int(record_items * setting.range_percent) # 計算範圍值 star_num = int(record_items - std_num) end_num = int(record_items + std_num) new_val[item_type] = [star_num,end_num] # 移除已售出的id close_id = [x['id'] for x in user_today_arr] for x in close_id: items.remove(x); # 依照新條件,找到喜愛的房屋 user_record_arr = self.get_stay_items_houses(new_val,users_items) new_id = [x['id'] for x in user_record_arr] for x in new_id: items.append(x); except: items = [] return list(set(items))
UTF-8
Python
false
false
42,837
py
60
function.py
39
0.430543
0.415221
0
1,032
39.09593
186
robintema/django-otp
18,141,941,861,855
b8a33c258ba23b7e9e27207f0b065558f1a9e4b9
c9a43f2949def22860d95af52f8f49f780dee2c1
/django_otp/plugins/otp_email/models.py
6de901da120734dc39d64595b1f925621864d60c
[ "BSD-2-Clause" ]
permissive
https://github.com/robintema/django-otp
111bcfb6401740b516a7045e4e614aebb0e33355
9928bc353c8a49787df2d5b7ee751afb9ac2ed6f
refs/heads/master
2021-01-10T04:23:56.981408
2015-11-24T13:29:52
2015-11-24T13:29:52
46,793,279
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from binascii import unhexlify from six import PY2 from django.core.mail import send_mail from django.db import models from django.template.loader import render_to_string from django_otp.models import Device from django_otp.oath import totp from django_otp.util import hex_validator, random_hex from .conf import settings def default_key(): return random_hex(20) def key_validator(value): return hex_validator()(value) class EmailDevice(Device): """ A :class:`~django_otp.models.Device` that delivers a token to the user's registered email address (``user.email``). This is intended for demonstration purposes; if you allow users to reset their passwords via email, then this provides no security benefits. .. attribute:: key *CharField*: A hex-encoded secret key of up to 40 bytes. (Default: 20 random bytes) """ key = models.CharField(max_length=80, validators=[key_validator], default=default_key, help_text='A hex-encoded secret key of up to 20 bytes.') @property def bin_key(self): if PY2: key = self.key.encode() else: key = self.key return unhexlify(key) def generate_challenge(self): token = totp(self.bin_key) body = render_to_string('otp/email/token.txt', {'token': token}) send_mail(settings.OTP_EMAIL_SUBJECT, body, settings.OTP_EMAIL_SENDER, [self.user.email]) message = "sent by email" return message def verify_token(self, token): try: token = int(token) except Exception: verified = False else: verified = any(totp(self.bin_key, drift=drift) == token for drift in [0, -1]) return verified
UTF-8
Python
false
false
1,886
py
3
models.py
3
0.607635
0.600212
0
69
26.333333
89
Rizo-R/Risk
19,301,583,061,867
6ad5168f4a91ccccfc2b8c7d2c50dbf3284e9839
54aaaa788d8c4ecb801a10fe7ba680212f7d815d
/player.py
d50375cd98027acf2103b494ef5a02e2b36d5c10
[]
no_license
https://github.com/Rizo-R/Risk
bc18e11ac10d69b37c6b8d58d63bd0189053eaf3
568d9a93da3444afdfa979af2dfd06e9a8b184fa
refs/heads/master
2023-04-05T08:07:08.753440
2021-04-08T16:34:03
2021-04-08T16:34:03
290,829,609
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# from continent import * from itertools import combinations from color import Color from card import Card, add_card, find_card, remove_card, total_wildcards from troop import Troop class Player(): def __init__(self, color, troops, cards=[]): ''' [color] is a Color object. [troops] is int. [cards] is a list of Card objects. No duplicates, i.e. each player has their own unique color. Note: [troops] refers to new troops that the player gets in the beginning of each turn, as well as the initial troops they have during territory claim in the very beginning of the game.''' self.color = color self.troops = troops self.cards = cards def get_color(self): return self.color def get_troops(self): return self.troops def get_cards(self): return self.cards def set_troops(self, troops): self.troops = troops def add_troops(self, troops): self.troops += troops def subtract_troops(self, troops): self.troops -= troops # Note: [card] must be a Card object. def give_card(self, card): self.cards = add_card(card, self.cards) def take_card(self, territory): '''Returns the card with given [territory] while removing it from the player's hand. [territory] must be a Node.''' card = find_card(territory, self.cards) self.cards = remove_card(territory, self.cards) return card # def string_cards(self): # if len(self.cards) == 0: # return "[]" # res = "[" # for i in range(len(self.cards)-1): # # For some reason, __str__() method in Card class doesn't work # # as intended. Instead, it keeps printing __str__() for Node. # card = self.cards[i] # res += str(self.cards[i]) # res += ", " # res += str(self.cards[-1]) # res += "]" # return res def count_wildcards(self): '''Helper function for combine_cards(). Returns the number wildcards owned by player and a copy of player's hand without the wildcards (Card list). Doesn't change player's hand.''' count = 0 no_wildcards = [] for card in self.cards: if card.get_troop_type() == Troop.WILDCARD: assert card.get_node() == None, "A wildcard with non-empty territory!" count += 1 else: no_wildcards.append(card) assert count <= total_wildcards, "%s Player has too many wildcards!" % self.color.name return count, no_wildcards @staticmethod def count_wildcards_list(card_lst): '''Counts wildcards in [card_lst].''' count = 0 for card in card_lst: if card.get_troop_type() == Troop.WILDCARD: count += 1 return count @staticmethod def two_same(card_lst): ''' Helper function for possible_combos(). Given [card_lst], returns a list of all possible combinations of two cards where the two cards are of the same kind. Could return an empty list. Precondition: [card_lst] has no wildcards. ''' if len(card_lst) < 2: return [] elif len(card_lst) == 2 and card_lst[0].get_troop_type() == card_lst[1].get_troop_type(): return card_lst def possible_combos(self): ''' Helper function for decide(). Finds all possible card combinations available for the player. Returns a possibly empty list of all possibilities of cards (Card list list). Preconditions: there are only two wildcards in the desk, i.e. [num_wildcards] == 2. Author might possibly make the function compatible with more wildcards in deck in the future. ''' res = [] if self.cards == []: return [] wildcards_owned, other_cards = self.count_wildcards() # Initialize a wild card to possibly add to the output. wildcard = Card(Troop.WILDCARD, None) # Player has 2 wildcards. Any other card will make a combo. if wildcards_owned == 2 and len(self.cards) > 2: for card in other_cards: res.append([wildcard, wildcard, card]) # Player at least one wildcard. Any 2 cards of either the same or # different types will make a combination. if wildcards_owned >= 1: two_comb = combinations(other_cards, 2) for el in list(two_comb): res.append([wildcard] + list(el)) # Check all 3-card combos without wildcards. three_comb = combinations(other_cards, 3) for comb in list(three_comb): if Card.combine(comb) > -1: res.append(comb) return res def count_bonus(self, card_lst, deploy=False): '''Given a valid card combination, calculates the total bonus given to the player (+2 troops per territory on a card that is owned by the player). If [deploy] is False, it will just count territorial bonus without actually deploying troops. Preconditions: [card_lst] is a valid 3-card combination that can bring bonus troops. No more than 2 wildcards allowed.''' assert len(card_lst) == 3 card_bonus = 0 total_bonus = 0 wildcards = Player.count_wildcards_list(card_lst) troops = set() # Count territory bonus and troop types. for card in card_lst: if card.get_troop_type() != Troop.WILDCARD: troops.add(card.get_troop_type()) # Check for territorial bonus. node = card.get_node() if node.get_owner() == self.color: total_bonus += 2 if deploy: print("2 bonus troops deployed in %s." % node.get_name()) node.add_troops(2) if len(troops) == 3 or (len(troops) == 2 and wildcards == 1) or (wildcards == 2): card_bonus = 10 else: card_bonus = troops.pop().value # # Count card bonus depending on wildcards in [card_lst]. # if wildcards == 1: # if len(troops) == 2: # card_bonus = 10 # else: # card_bonus = troops.pop().value # elif wildcards == 2: # card_bonus = 10 # # No wildcards. # else: # if len(troops) == 3: # card_bonus = 10 # elif len(troops) == 1: # card_bonus = troops.pop().value total_bonus += card_bonus return card_bonus, total_bonus def decide(self): '''Based on player's hand, picks the best possible hand.''' best_hand = [] best_hand_wildcards = 0 max_bonus = 0 card_combos = self.possible_combos() for combo in card_combos: # print("\nBest hand: %s." % str(best_hand)) # print("Best wildcards: %i. Best card bonus: %i. Best total bonus: %i." % # (best_hand_wildcards, self.count_bonus(combo, False)[0], self.count_bonus(combo, False)[1])) # print("\nCurrent combo: %s." % str(combo)) wildcards = Player.count_wildcards_list(combo) card_bonus, total_bonus = self.count_bonus(combo, False) # print("Wildcards: %i. Card bonus: %i. Total bonus: %i." % # (wildcards, card_bonus, total_bonus)) # Pick the highest bonus with least wildcards used. if total_bonus > max_bonus or (total_bonus == max_bonus and wildcards < best_hand_wildcards): best_hand = combo best_hand_wildcards = wildcards max_bonus = total_bonus return list(best_hand) def use_cards(self, card_lst): card_bonus, total_bonus = self.count_bonus(card_lst, True) print("You have %i total troops in bonus." % total_bonus) for card in card_lst: _ = self.take_card(card.get_node()) return card_bonus # p1 = Player(Color.RED, 0, [ # Card(Troop.WILDCARD, None), # Card(Troop.WILDCARD, None), # Card(Troop.INFANTRY, E1) # ]) # p2 = Player(Color.RED, 0, [ # Card(Troop.WILDCARD, None), # Card(Troop.WILDCARD, None) # ]) # p3 = Player(Color.RED, 0, [ # Card(Troop.INFANTRY, E2), # Card(Troop.INFANTRY, E3), # Card(Troop.INFANTRY, E4), # ]) # p4 = Player(Color.RED, 0, [ # Card(Troop.INFANTRY, E5), # Card(Troop.INFANTRY, E6), # Card(Troop.CAVALRY, E7), # ]) # p5 = Player(Color.RED, 0, [ # Card(Troop.INFANTRY, AF1), # Card(Troop.CAVALRY, AF2), # Card(Troop.ARTILLERY, AF3), # ]) # p6 = Player(Color.RED, 0, [ # Card(Troop.WILDCARD, None), # Card(Troop.ARTILLERY, AF4), # Card(Troop.INFANTRY, AF5), # ]) # p7 = Player(Color.RED, 0, [ # Card(Troop.WILDCARD, None), # Card(Troop.ARTILLERY, AS1), # Card(Troop.INFANTRY, AS2), # Card(Troop.INFANTRY, AS3), # Card(Troop.CAVALRY, AS4), # ]) # p8 = Player(Color.RED, 0, [ # Card(Troop.WILDCARD, None), # Card(Troop.WILDCARD, None), # Card(Troop.ARTILLERY, AS5), # Card(Troop.INFANTRY, AS6), # Card(Troop.CAVALRY, AS7), # Card(Troop.INFANTRY, AS8), # Card(Troop.ARTILLERY, AS9), # ]) # p9 = Player(Color.RED, 0, [ # Card(Troop.INFANTRY, AS10), # Card(Troop.CAVALRY, AS11), # Card(Troop.ARTILLERY, AS12), # Card(Troop.INFANTRY, AU1), # Card(Troop.CAVALRY, AU2), # Card(Troop.ARTILLERY, AU3), # ]) # p10 = Player(Color.RED, 0, [ # Card(Troop.WILDCARD, None), # Card(Troop.ARTILLERY, NA1), # Card(Troop.INFANTRY, NA2), # Card(Troop.INFANTRY, NA3), # Card(Troop.ARTILLERY, NA4), # ]) # p11 = Player(Color.RED, 0, [ # Card(Troop.INFANTRY, NA5), # Card(Troop.INFANTRY, NA6), # Card(Troop.CAVALRY, NA7), # Card(Troop.CAVALRY, NA8), # ])
UTF-8
Python
false
false
10,072
py
12
player.py
11
0.565131
0.553912
0
295
33.142373
112
Mohnish226/Computer-vision-learn
3,049,426,811,537
63ab9387a4ccf06b4b92f5ab1aea56f1832457de
0a082eb6f8350e77a99ebf87cde107edaa0e916e
/Mini Project/Photo restoration/Photo_restoration.py
56a9692ef3a16387593d605f7e61ba1670796c62
[]
no_license
https://github.com/Mohnish226/Computer-vision-learn
7eb752a7cf98d093fdabf95fcfdd4502f0f70eb8
876a64b3ff001d2e881e11ae290266bb70fe9629
refs/heads/master
2021-01-20T06:11:46.735739
2017-07-01T18:50:31
2017-07-01T18:50:31
89,849,285
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Sat Jul 1 23:50:23 2017 @author: Mohnish_Devadiga """ import cv2 import numpy as np # Load our damaged photo image = cv2.imread('images/abraham.jpg') cv2.imshow('Original Damaged Photo', image) cv2.waitKey(0) # Load the photo where we've marked the damaged areas marked_damages = cv2.imread('images/mask.jpg', 0) cv2.imshow('Marked Damages', marked_damages) cv2.waitKey(0) # Let's make a mask out of our marked image be changing all colors # that are not white, to black ret, thresh1 = cv2.threshold(marked_damages, 254, 255, cv2.THRESH_BINARY) cv2.imshow('Threshold Binary', thresh1) cv2.waitKey(0) # Let's dilate (make thicker) our the marks w made # since thresholding has narrowed it slightly kernel = np.ones((7,7), np.uint8) mask = cv2.dilate(thresh1, kernel, iterations = 1) cv2.imshow('Dilated Mask', mask) cv2.imwrite("images/abraham_mask.png", mask) cv2.waitKey(0) restored = cv2.inpaint(image, mask, 3, cv2.INPAINT_TELEA) cv2.imshow('Restored', restored) cv2.waitKey(0) cv2.destroyAllWindows()
UTF-8
Python
false
false
1,078
py
37
Photo_restoration.py
36
0.730056
0.680891
0
41
25.292683
73
TOTOSOLEIL3/Minigames
2,843,268,394,207
3c6ddfc614917ddfdd22d4feaa5a4e3939d0354d
66929f9aed2bc9a9c7bca5879bcaa3073defc02d
/trouve le nombre.py
055aa20980b18a914b674563816aab4039a69ec6
[ "MIT" ]
permissive
https://github.com/TOTOSOLEIL3/Minigames
2dc3e81a1634ed7094b0d516f68619a2e94116b5
45d067069afb0664c36cb2264ba3baffe590914d
refs/heads/main
2023-07-04T16:24:19.417042
2021-08-12T15:11:51
2021-08-12T15:11:51
395,358,986
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import random, os, sys from pyfade import Colors, Fade from os import name from pycenter import center def clear(): if name == 'nt': os.system('cls') else: os.system('clear') def acueille_mode(): clear() def main(): maxn = 100 n = random.randint(1, maxn) Jeux = """ ▄▄▄▄███▄▄▄▄ ▄█ ███▄▄▄▄ ▄█ ▄██████▄ ▄████████ ▄▄▄▄███▄▄▄▄ ▄████████ ▄████████ ▄██▀▀▀███▀▀▀██▄ ███ ███▀▀▀██▄ ███ ███ ███ ███ ███ ▄██▀▀▀███▀▀▀██▄ ███ ███ ███ ███ ███ ███ ███ ███▌ ███ ███ ███▌ ███ █▀ ███ ███ ███ ███ ███ ███ █▀ ███ █▀ ███ ███ ███ ███▌ ███ ███ ███▌ ▄███ ███ ███ ███ ███ ███ ▄███▄▄▄ ███ ███ ███ ███ ███▌ ███ ███ ███▌ ▀▀███ ████▄ ▀███████████ ███ ███ ███ ▀▀███▀▀▀ ▀███████████ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ▄█ ███ ▀█ ███ █▀ █▀ ▀█ █▀ █▀ ████████▀ ███ █▀ ▀█ ███ █▀ ██████████ ▄████████▀ """ print(Fade.Vertical(Colors.red_to_yellow, center(Jeux))) print("Bienvenue sur Mini games") print('Choisis un chiffre de 1 à %d' % maxn) guess = None while guess != n: guess = int(input('Chiffre: ')) if guess > n: print('Trop haut') if guess < n: print('Trop bas') print('Bravo, tu as gagné !') main() liste = ["R","r"] listes = ["Q","q"] while True: rep = input("Veux tu rejouer ? R[ejouer] ou Q[uitter]: \n").lower() if rep == "r": clear() main() if rep == "q": sys.exit()
UTF-8
Python
false
false
2,795
py
3
trouve le nombre.py
1
0.262073
0.25936
0
56
30.946429
110
MaiYunfei2000/trail-py-practice
9,320,079,064,792
66f5f09717488ab4bf2a1d520b7d58fe4048e2a1
f97c5a565f43a77a2c24fdf47a5cb7ec976f1c9e
/dlfr/ex060103.py
cc97b5fa2f398f62d9c9ecb318a7bbb3ec9bc411
[]
no_license
https://github.com/MaiYunfei2000/trail-py-practice
0dd6feb12c7b83c0569690d0818b195d4f0a603d
56cb96b4633a7e355ea88292efc6488212ffa11a
refs/heads/master
2020-04-22T04:15:20.133349
2020-04-12T07:43:14
2020-04-12T07:43:14
170,116,938
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
##### 6.1 处理文本数据 #### 6.1.3 整合在一起:从原始文本到词嵌入 ### 1. 下载IMDB数据的原始文本 ### 6-8 处理IMDB原始数据的标签 import os imdb_dir = 'aclImdb' # [os.path --- 常用路径操作 — Python 3.8.2rc1 文档](https://docs.python.org/zh-cn/3/library/os.path.html?highlight=os%20path%20join#os.path.join) # 然而到底是什么意思……? train_dir = os.path.join(imdb_dir, 'train') labels = [] texts = [] for label_type in ['neg', 'pos']: dir_name = os.path.join(train_dir, label_type) # https://docs.python.org/zh-cn/3/library/os.html?highlight=os%20listdir#os.listdir # 返回一个列表,该列表包含了 path 中所有文件与目录的名称。该列表按任意顺序排列,并且不包含特殊条目 '.' 和 '..',即使它们确实在目录中存在 # 这个列表没有套娃 print(os.listdir(dir_name)) print(os.listdir(dir_name)[0]) for fname in os.listdir(dir_name): if fname[-4:] == '.txt': f = open(os.path.join(dir_name, fname)) texts.append(f.read()) f.close() if label_type == 'neg': labels.append(0) else: labels.append(1) ### 2. 对数据进行分词 ### 6-9 对IMDB原始数据的文本进行分词 from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import numpy as np
UTF-8
Python
false
false
1,443
py
135
ex060103.py
122
0.62033
0.596872
0
43
25.790698
137
rihardsbelinskis/d4_pr3fl0p_b0t_v2
12,893,491,864,361
c5856ffe28cc07ffc4336d993f76a57513622d46
2dd087ef1a9cfacba47f6e427080656e6c31f310
/v01/detectcards.py
4089918e8a8fa09ca2ecd37301a88133a98345ea
[]
no_license
https://github.com/rihardsbelinskis/d4_pr3fl0p_b0t_v2
520445a765921cd53a219d22fd11446d818f07ff
8c3579f666513beb82bbf29fcb53cc249a87de8c
refs/heads/master
2022-04-16T22:27:52.174440
2020-04-18T12:02:28
2020-04-18T12:02:28
254,669,890
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pyautogui import time #s=pyautogui.screenshot() #a = s.getpixel((640, 350, 0)) #atrod konkreta piksela rgb datus #s.save(r'screen.png') # atrod visas vietas, kur uz ekrana atrodas bilde nba.png un uzraksta # koordinates katrai bildei #i = 0 #for pos in pyautogui.locateAllOnScreen('BOT1table.png'): # i= i+1 #print("there are ", i, "tables") ####### NOSAKA, CIK GALDI IR ATVERTI PEC PISKELA STARP GALDIEM ########### ########################################################################## #pix6 = pyautogui.pixel(640, 350) #pix4 = pyautogui.pixel(700, 350) #pix2 = pyautogui.pixel(960, 350) #if (pix6[0]) == 3 : # print("ir 6 galdi!") #elif (pix4[0]) == 3: # print('ir 4 galdi!') #elif (pix2[0]) == 3: # print('ir 2 galdi!') #else: # print('ir 1 galds!') ########################################################################### ########################################################################### ###############SALIDZINA KARTS SCRENSHOTU AR DECKU############################### im = pyautogui.screenshot(region=(1070, 628, 22, 49)) im.save(r'karts1.png') im = pyautogui.screenshot(region=(1162, 628, 22, 49)) im.save(r'karts2.png') karts1 = ('karts1.png') karts2 = ('karts2.png') deck = ('deck5.png') card1 = pyautogui.locate(karts1, deck, grayscale=False) Value1 = card1[0] Suit1 = card1[1] #print(Suit1, Value1) card2 = pyautogui.locate(karts2, deck, grayscale=False) Value2 = card2[0] Suit2 = card2[1] #print(Suit2, Value2) #nosaka karts1 vertibu if Value1 == 1: CardValue1 = "2" elif Value1 == 71: CardValue1 = "3" elif Value1 == 141: CardValue1 = "4" elif Value1 == 211: CardValue1 = "5" elif Value1 == 281: CardValue1 = "6" elif Value1 == 3511: CardValue1 = "7" elif Value1 == 421: CardValue1 = "8" elif Value1 == 491: CardValue1 = "9" elif Value1 == 561: CardValue1 = "T" elif Value1 == 631: CardValue1 = "J" elif Value1 == 701: CardValue1 = "Q" elif Value1 == 771: CardValue1 = "K" elif Value1 == 841: CardValue1 = "A" #nosaka karts1 suitu if Suit1 == 1: CardSuit1 = "s" elif Suit1 == 99: CardSuit1 = "c" elif Suit1 == 197: CardSuit1 = "d" elif Suit1 == 295: CardSuit1 = "h" #nosaka karts2 vertibu if Value2 == 1: CardValue2 = "2" elif Value2 == 71: CardValue2 = "3" elif Value2 == 141: CardValue2 = "4" elif Value2 == 211: CardValue2 = "5" elif Value2 == 281: CardValue2 = "6" elif Value2 == 3511: CardValue2 = "7" elif Value2 == 421: CardValue2 = "8" elif Value2 == 491: CardValue2 = "9" elif Value2 == 561: CardValue2 = "T" elif Value2 == 631: CardValue2 = "J" elif Value2 == 701: CardValue2 = "Q" elif Value2 == 771: CardValue2 = "K" elif Value2 == 841: CardValue2 = "A" #nosaka karts2 suitu if Suit2 == 1: CardSuit2 = "s" elif Suit2 == 99: CardSuit2 = "c" elif Suit2 == 197: CardSuit2 = "d" elif Suit2 == 295: CardSuit2 = "h" #print(f'My first card is {CardValue1}{CardSuit1}') #print(f'My second card is {CardValue2}{CardSuit2}') if CardSuit1 == CardSuit2: Suit = "s" else: Suit = "o" print(f'My hand is: {CardValue1}{CardValue2}{Suit}') ################################################################################
UTF-8
Python
false
false
3,230
py
17
detectcards.py
15
0.558824
0.471827
0
138
22.398551
81
tfang2/PythonEssentialTraining
8,306,466,757,261
f4225e7388ee29a3431a4aaf3f908268e38a7967
47cf62a20110780bafbaec86feabb765fc27a6fc
/Section 12/object_data.py
ae67902a51c5cc90a0152cf4301f0d7e6f981b0d
[]
no_license
https://github.com/tfang2/PythonEssentialTraining
006973cb29290292ff20788ce991450d29484487
ac1367cc037fb6a4f1fb40e3c15e697b76b5b0d5
refs/heads/master
2020-12-26T15:41:24.878834
2016-03-02T12:08:57
2016-03-02T12:08:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# classes.py by Bill Weinman [http://bw.org/] # This is an exercise file from Python 3 Essential Training on lynda.com # Copyright 2010 The BearHeart Group, LLC class Duck: def __init__(self, **kwargs): print("The constructor has been called") # assume the color assignment is being made via # the key 'color' # we access the key 'color' if it exists, otherwise # we default to using 'white' # self._color = kwargs.get('color', 'white') # save our properties in a dictionary self.variables = kwargs def quack(self): print('Quaaack!') def walk(self): print('Walks like a duck.') # get the color the proper way def get_variable(self, k): return self.variables.get(k, None) # set the color the proper way def set_variable(self, k, v): self.variables[k] = v def main(): donald = Duck(color='organge') # accessing a property externally - bad practice! # print(donald._color) # override the property externally - bad practice! # donald._color = 'green' # accessing a property externally - bad practice! # print(donald._color) # changing the color the proper way # donald.set_color("Red") # retrieving the color the proper way print(donald.get_variable('color')) donald.quack() donald.walk() donald.set_variable('feet', 3) print("Donald has {} feet".format(donald.get_variable('feet'))) if __name__ == "__main__": main()
UTF-8
Python
false
false
1,508
py
24
object_data.py
23
0.626658
0.622679
0
51
28.588235
72
gautamvij/Video-Summarization-using-ConvNet
6,940,667,155,442
362950ed5aca7bbd8a13049f8fdeb661b737e4b0
7e821af9fee56ab51cfc4c4013198e0db5c9b121
/code_top_layer/combine.py
811ebf25cc2b016c583c87ba4ed824a6b914f32e
[]
no_license
https://github.com/gautamvij/Video-Summarization-using-ConvNet
b4bf3324c8092d7782cd54d33f6b9a8f9006ad98
5015cb6f4c51f9456d0ea515fee181d3aadbacdc
refs/heads/master
2021-01-10T11:12:42.106588
2016-03-29T18:37:05
2016-03-29T18:37:05
48,489,634
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
import theano.tensor as T import numpy as np from theano.tensor.signal import downsample from collections import Counter from copy import deepcopy import cv2,sys import cPickle import random import theano " attention function receives list of 10 level segmentations , upsampled features from convnet, labels " def attention_func(segments,upsampled_features,labels): num_components=[] masked_components=[] true_distribution=[] width=[(0,0),(0,0)] " processing each level of segmentation " for segments_fz in segments: " Different components " comps=np.unique(segments_fz) num_components.append(len(comps)) " Processing each component " for val in comps: points=[] temp=[] a,b=np.where(segments_fz==val) for r,c in zip(a,b): points.append([[r,c]]) temp.append(labels[r,c]) " Ground Distribution calculation " k=Counter(temp) temp=[] for a in range(34): temp.append(k[a]) s=sum(temp) for a in range(34): temp[a]=float(temp[a])/s true_distribution.append(temp) " Bounding rectangle for the component " x,y,w,h=cv2.boundingRect(np.asarray(points)) temp=deepcopy(segments_fz[x:x+w,y:y+h]) features=deepcopy(upsampled_features[:,x:x+w,y:y+h]) temp[temp!=val]=0 temp[temp==val]=1 " Background subtraction " masked=np.multiply(temp,features) " Decide width for padding " if(w%3==0): width[0]=(0,0) elif(w%3==1): width[0]=(1,1) else: width[0]=(0,1) if(h%3==0): width[1]=(0,0) elif(h%3==1): width[1]=(1,1) else: width[1]=(0,1) "padding " padded=pad(masked,width).eval() " Elastic max pooling " object_descriptor=elastic_max_pool(padded).eval() " list of masked components " masked_components.append(object_descriptor) " True distribution is list of list of density of all 34 labels in components in image " return masked_components,true_distribution def pad(x, width, val=0, batch_ndim=1): input_shape = x.shape input_ndim = x.ndim output_shape = list(input_shape) indices = [slice(None) for _ in output_shape] if isinstance(width, int): widths = [width] * (input_ndim - batch_ndim) else: widths = width for k, w in enumerate(widths): try: l, r = w except TypeError: l = r = w output_shape[k + batch_ndim] += l + r indices[k + batch_ndim] = slice(l, l + input_shape[k + batch_ndim]) if val: out = T.ones(output_shape) * val else: out = T.zeros(output_shape) return T.set_subtensor(out[tuple(indices)], x) def elastic_max_pool(inp): num_feature_maps,w,h=inp.shape maxpool_shape=(w/3,h/3) return downsample.max_pool_2d(inp,maxpool_shape) args=sys.argv if(len(args) < 5): print "Command line args missing : args[1] - segments file , args[2] - scale invariant features file , args[3] - labels file, args[4]- output file" sys.exit(-1) f1=file(args[1],'rb') f2=file(args[2],'rb') f3=file(args[3],'rb') f4=file(args[4],'wb') """f1=file('train_segments.pkl','rb') f2=file('train_scale_invariant_features.pkl','rb') f3=file('train_labels.pkl','rb') f4=file('train_top_layer.pkl','wb')""" for i in range(36): label_list=cPickle.load(f3) descriptors=[] ground_distribution=[] for k,labels in enumerate(label_list): segments=cPickle.load(f1) temp1,temp2=attention_func(segments,upsampled_features,labels) descriptors=descriptors+temp1 ground_distribution=ground_distribution+temp2 " Dump object descriptors and ground distribution tuple " if(k!=0 and (k+1)%5==0): cPickle.dump((descriptors,ground_distribution),f4) descriptors=[] ground_distribution=[] f1.close() f2.close() f3.close() f4.close()
UTF-8
Python
false
false
4,264
py
11
combine.py
10
0.57575
0.55652
0
130
31.761538
151
simon-Hero/flask_dz
5,007,931,886,196
335fdb7d7fa2888c70340c10bcee04d792e17878
cb27d3517627f13e995d8dd38c030793c08c0dc2
/dz/v_1_0/index.py
7b421f348b98a9394f4cfdac13bf6574dc587615
[]
no_license
https://github.com/simon-Hero/flask_dz
5df623de654bb07a42c85038a2796d128184a043
f99ec51d790721679d75f25cf33554668be42e3c
refs/heads/master
2020-12-05T19:29:40.088824
2020-02-27T02:55:24
2020-02-27T02:55:24
232,224,456
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from . import api from flask import current_app, jsonify, session, request from dz.utils.response_code import RET from dz.models import Area, Order, House from dz import redis_store from config import settings import json from datetime import datetime @api.route("/areas") def get_area_info(): """获取城区信息""" # 查缓存 try: resp_json = redis_store.get("area_info") except Exception as e: current_app.logger.error(e) else: if resp_json is not None: current_app.logger.info("redis have area info") return resp_json, 200, {"Content-Type": "application/json"} # 查数据库,转换为json,并保存入redis try: area_li = Area.query.all() except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="数据库异常") area_dict_li = [] for area in area_li: area_dict_li.append(area.to_dict()) res_dict = dict(errno=RET.OK, errmsg="OK", data=area_dict_li) resp_json = json.dumps(res_dict) try: redis_store.setex("area_info", settings.AREA_INFO_REDIS_CACHE_EXPIRES, resp_json) except Exception as e: current_app.logger.error(e) return resp_json, 200, {'Content-Type': "application/json"} @api.route("/check", methods=["GET"]) def check_login(): """检查登陆状态""" name = session.get("name") if name is not None: return jsonify(errno=RET.OK, errmsg="true", data={"name": name}) else: return jsonify(errno=RET.SESSIONERR, errmsg="false") @api.route("/houses", methods=["GET"]) def get_house_list(): """ 搜索页面中根据条件查找相应房屋返回前端 :return: """ start_date = request.args.get("sd", "") end_date = request.args.get("ed", "") area_id = request.args.get("aid", "") page = request.args.get("p") sort_key = request.args.get("sk", "new") # 排序关键字 try: if start_date: start_date = datetime.strptime(start_date, "%Y-%m-%d") if end_date: end_date = datetime.strptime(end_date, "%Y-%m-%d") if start_date and end_date: assert start_date <= end_date except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.PARAMERR, errmsg="日期参数有误") if area_id: try: Area.query.get(area_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.PARAMERR, errmsg="区域参数有误") try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 # 获取缓存数据 redis_key = "house_%s_%s_%s_%s" % (start_date, end_date, area_id, sort_key) try: resp_json = redis_store.hget(redis_key, page) except Exception as e: current_app.logger.error(e) else: if resp_json: return resp_json, 200, {"Content-Type": "application/json"} filter_params = [] # 填充过滤参数,时间条件 conflict_orders = None # 有冲突的订单 try: if start_date and end_date: conflict_orders = Order.query.filter(Order.begin_time <= end_date, Order.end_time >= start_date).all() elif start_date: conflict_orders = Order.query.filter(Order.end_time >= start_date).all() elif end_date: conflict_orders = Order.query.filter(Order.begin_time <= end_date).all() except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="数据库异常") if conflict_orders: # 从订单中获取冲突的房屋id conflict_house_ids = [order.house_id for order in conflict_orders] # 若房屋id不在冲突的房屋id中,向查询参数中添加条件 if conflict_house_ids: filter_params.append(House.id.notin_(conflict_house_ids)) if area_id: filter_params.append(House.area_id == area_id) if sort_key == "booking": house_query = House.query.filter(*filter_params).order_by(House.order_count.desc()) elif sort_key == "price-inc": house_query = House.query.filter(*filter_params).order_by(House.price.asc()) elif sort_key == "price-des": house_query = House.query.filter(*filter_params).order_by(House.price.desc()) else: house_query = House.query.filter(*filter_params).order_by(House.create_time.desc()) try: page_obj = house_query.paginate(page=page, per_page=settings.HOUSE_LIST_PAGE_CAPACITY, error_out=False) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="数据库异常") house_li = page_obj.items houses = [] for house in house_li: houses.append(house.to_basic_dict()) total_page = page_obj.pages data = { "total_page": total_page, "houses": houses, "current_page": page } resp_dict = dict(errno=RET.OK, errmsg="OK", data=data) resp_json = json.dumps(resp_dict) if page <= total_page: redis_key = "house_%s_%s_%s_%s" % (start_date, end_date, area_id, sort_key) try: pipeline = redis_store.pipeline() pipeline.multi() pipeline.hset(redis_key, page, resp_json) pipeline.expire(redis_key, settings.HOUES_LIST_PAGE_REDIS_CACHE_EXPIRES) pipeline.execute() except Exception as e: current_app.logger.error(e) return resp_json, 200, {"Content-Type": "application/json"} @api.route("/house/index", methods=["GET"]) def get_house_index(): """主页轮播图图片展示""" try: ret = redis_store.get("home_page_data") except Exception as e: current_app.logger.error(e) ret = None if ret: current_app.logger.info("redis have house_index data") return '{"errno":0, "errmsg":"OK", "data":%s}' % str(ret, encoding="utf8"), 200, {"Content-Type": "application/json"} try: houses = House.query.order_by(House.order_count.desc()).limit(settings.HOME_PAGE_MAX_HOUSES) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="查询数据失败") if not houses: return jsonify(errno=RET.NODATA, errmsg="无数据") houses_list = [] for house in houses: if not house.index_image_url: continue houses_list.append(house.to_basic_dict()) json_house = json.dumps(houses_list) try: redis_store.setex("home_page_data", settings.HOUES_LIST_PAGE_REDIS_CACHE_EXPIRES, json_house) except Exception as e: current_app.logger.error(e) return '{"errno":0, "errmsg":"OK", "data":%s}' % json_house, 200, {"Content-Type": "application/json"}
UTF-8
Python
false
false
6,878
py
28
index.py
25
0.60861
0.605263
0
211
30.14218
125
nbompetsis/AgentNMap
17,884,243,851,333
5ff80e5c10b138ae9652fc6977829ed8c843d24b
9e2fd5f8250d5499e452ea141a5cac078594d150
/agent/agent.py
34b371f282c17504f2cbe4bccc089745503dd567
[]
no_license
https://github.com/nbompetsis/AgentNMap
0c971aa904db810a29a7d180d80774c5d1c0d9ad
6d0265b1557b7e7f2cd4b7a6a041a046a5e5f46f
refs/heads/master
2021-01-10T07:02:15.489949
2015-10-27T20:04:40
2015-10-27T20:04:40
44,936,127
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from structures.NMapJobsStructure import NMapJobsStructure class AgentNMap: """ This is the AgentNmap class. The agent manipulates the nmap call """ name = '' jobs = [] def __init__(self, name ): self.name = name def setJobs(self, jobs): for job in jobs: self.jobs.append(job) def print_agent(self): print("Name ", self.name) for job in self.jobs: job.print_structure()
UTF-8
Python
false
false
544
py
6
agent.py
4
0.496324
0.496324
0
21
22.238095
76
betty29/code-1
8,555,574,900,682
a713617e1ea39610d1cd2eef2cc1cce5fc7eb446
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
/recipes/Python/577414_8_queens_/recipe-577414.py
484be149e406d1ff873010dcb40243943d2125d0
[ "MIT" ]
permissive
https://github.com/betty29/code-1
db56807e19ac9cfe711b41d475a322c168cfdca6
d097ca0ad6a6aee2180d32dce6a3322621f655fd
refs/heads/master
2023-03-14T08:15:47.492844
2021-02-24T15:39:59
2021-02-24T15:39:59
341,878,663
0
0
MIT
false
2021-02-24T15:40:00
2021-02-24T11:31:15
2021-02-24T12:00:22
2021-02-24T15:39:59
8,183
0
0
0
Python
false
false
#!/usr/bin/env python def back(path): if reject(path): return None if issolution(path): print path for brother in makebrothers(path): if len(path)<8: newpath = back(brother) if newpath: return newpath return None def issolution(path): return len(path)>7 def makebrothers(path): path = path +[0] while path[len(path)-1]<8: yield path path[len(path)-1]= path[len(path)-1]+1 def reject(path): t = False for i in range(len(path)): for j in range(i+1,len(path)): if abs((path[j]-path[i])*1.0/(j-i))==1 or path[j]==path[i]: return True return t back([])
UTF-8
Python
false
false
724
py
8,810
recipe-577414.py
3,493
0.527624
0.51105
0
30
23.133333
72
Fabio4651/TFC
17,617,955,860,919
d26148f94edefbcda96a5e38b221b1fcfed1d4c9
e0be03b1dc323eb9ff4406b457c0f1f394844ad7
/app.py
d6fbf0c8ab8a1cd22e664bf7bf019ccab3ab6884
[]
no_license
https://github.com/Fabio4651/TFC
834dac0a681e69b00295877340d2129435968478
993666972ba383f9ca462799632b178047a6d4fb
refs/heads/main
2023-07-07T01:50:52.368684
2021-08-07T15:54:08
2021-08-07T15:54:08
393,724,467
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import Flask, render_template, request, session, make_response, redirect, url_for, jsonify from flask_qrcode import QRcode from werkzeug.utils import secure_filename from flask_sqlalchemy import SQLAlchemy from flask_modals import Modal, render_template_modal from flask_session import Session from os.path import join, dirname, realpath from flask_uploads import IMAGES, UploadSet, configure_uploads import pandas as pd from pathlib import Path from werkzeug.utils import secure_filename import json import uuid from datetime import datetime app = Flask(__name__) modal = Modal(app) #The secret key shhh app.config['SECRET_KEY'] = '_1#y6G"F7Q2z\n\succ/' app.config['APPLICATION_ROOT'] = "/" app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:admin@localhost/befit' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE'] = 'sqlalchemy' db = SQLAlchemy(app) app.config['SESSION_SQLALCHEMY'] = db files = UploadSet('files', IMAGES) app.config['UPLOADED_FILES_ALLOW'] = set(['png', 'jpg', 'jpeg', 'gif', 'csv']) app.config['UPLOADED_FILES_DEST'] = 'static/upload' configure_uploads(app, files) class Users(db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) nome = db.Column(db.String(25)) apelido = db.Column(db.String(25)) data_nascimento = db.Column(db.Date) genero = db.Column(db.String(25)) peso = db.Column(db.Float) altura = db.Column(db.Float) saude = db.Column(db.Text) estilo_vida = db.Column(db.Text) telefone = db.Column(db.Integer) email = db.Column(db.String(50)) password = db.Column(db.String(50)) cargo = db.Column(db.String(45)) imgpath = db.Column(db.String(80)) def __init__(self, nome, apelido, data_nascimento, genero, peso, altura, saude, estilo_vida, telefone, email, password, cargo, imgpath): self.nome=nome self.apelido=apelido self.data_nascimento=data_nascimento self.genero=genero self.peso=peso self.altura=altura self.saude=saude self.estilo_vida=estilo_vida self.telefone=telefone self.email=email self.password=password self.cargo=cargo self.imgpath=imgpath def __repr__(self): return repr(id) class Trainers(db.Model): __tablename__ = 'trainers' id = db.Column(db.Integer, primary_key=True) nome = db.Column(db.String(25)) apelido = db.Column(db.String(25)) genero = db.Column(db.String(25)) telefone = db.Column(db.Integer) email = db.Column(db.String(50)) password = db.Column(db.String(50)) cargo = db.Column(db.String(45)) imgpath = db.Column(db.String(80)) def __init__(self, nome, apelido, genero, telefone, email, password, imgpath, cargo): self.nome=nome self.apelido=apelido self.genero=genero self.telefone=telefone self.email=email self.password=password self.imgpath=imgpath self.cargo=cargo def __repr__(self): return repr(id) class Exercicios(db.Model): __tablename__ = 'exercicios' id = db.Column(db.Integer, primary_key=True) nome = db.Column(db.String(25)) descricao = db.Column(db.Text) gifpath = db.Column(db.String(80)) def __init__(self, nome, descricao, gifpath): self.nome=nome self.descricao=descricao self.gifpath=gifpath def __repr__(self): return repr(id) class Planos(db.Model): __tablename__ = 'planos' id = db.Column(db.Integer, primary_key=True) nome = db.Column(db.String(25)) descricao = db.Column(db.String(25)) exercicios = db.Column(db.String(5000)) def __init__(self, nome, descricao, exercicios): self.nome=nome self.descricao=descricao self.exercicios=exercicios def __repr__(self): return repr(id) class Body(db.Model): __tablename__ = 'body' id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('users.id')) timestamp = db.Column(db.String(25)) date = db.Column(db.Date) weight = db.Column(db.String(25)) height = db.Column(db.String(25)) bmi = db.Column(db.String(25)) fatRate = db.Column(db.String(25)) bodyWaterRate = db.Column(db.String(25)) boneMass = db.Column(db.String(25)) metabolism = db.Column(db.String(25)) muscleRate = db.Column(db.String(25)) visceralFat = db.Column(db.String(25)) impedance = db.Column(db.String(25)) def __init__(self, user_id, timestamp, date, weight, height, bmi, fatRate, bodyWaterRate, boneMass, metabolism, muscleRate, visceralFat, impedance): self.user_id=user_id self.timestamp=timestamp self.date=date self.weight=weight self.height=height self.bmi=bmi self.fatRate=fatRate self.bodyWaterRate=bodyWaterRate self.boneMass=boneMass self.metabolism=metabolism self.muscleRate=muscleRate self.visceralFat=visceralFat self.impedance=impedance def __repr__(self): return repr(id) db.create_all() QRcode(app) @app.route('/') def index(): if 'email' in session: total_users = Users.query.filter_by(cargo="Utilizador").count() total_trainers = Trainers.query.count() total_Exercicios = Exercicios.query.count() return render_template('home.html', total_users=total_users, total_trainers=total_trainers, total_Exercicios=total_Exercicios) return redirect(url_for('loginPage')) @app.route('/uploadcsv') def uploadcsv(): if 'email' in session: return render_template('uploadcsv.html') return redirect(url_for('loginPage')) @app.route('/csv', methods=['POST']) def csv(): if 'email' in session: body = request.files['body'] #if request.method == 'POST' and body: filename = 'static/upload/' + files.save(request.files['body']) print("bruh"+filename) # CVS Column Names col_names = ['timestamp', 'weight', 'height', 'bmi', 'fatRate', 'bodyWaterRate', 'boneMass', 'metabolism', 'muscleRate', 'visceralFat', 'impedance'] # Use Pandas to parse the CSV file csvData = pd.read_csv(filename, names=col_names, header=None, skiprows=1) csvData = csvData.fillna("Null") # Loop through the Rows for i, row in csvData.iterrows(): #print(i, row['timestamp'], row['weight'], row['height'], row['bmi'], row['fatRate'], row['bodyWaterRate'], row['boneMass'], row['metabolism'], row['muscleRate'], row['visceralFat'], row['impedance']) """df = pd.DataFrame(csvData, columns = ["timestamp", "weight", "height", "bmi", "fatRate", "bodyWaterRate", "boneMass", "metabolism", "muscleRate", "visceralFat", "impedance"]) print(df.fillna("Null"))""" new_csv = Body(user_id = session['id'], timestamp = row['timestamp'], date = datetime.fromtimestamp(row['timestamp']).date(), weight = row['weight'], height = row['height'], bmi = row['bmi'], fatRate = row['fatRate'], bodyWaterRate = row['bodyWaterRate'], boneMass=row['boneMass'], metabolism=row['metabolism'], muscleRate = row['muscleRate'], visceralFat = row['visceralFat'], impedance=row['impedance']) db.session.add(new_csv) db.session.commit() print("============================================================") timestamp = 1624789573 dt_object = datetime.fromtimestamp(timestamp) only_date = dt_object.date() print("data =", only_date) #print("type(dt_object) =", type(dt_object)) return render_template('uploadcsv.html') return redirect(url_for('loginPage')) @app.route('/consulta') def consulta(): if 'email' in session: return render_template('consulta.html', list=list) return redirect(url_for('loginPage')) @app.route('/body') def body(): if 'email' in session: #list = Body.query.all().filter_by(user_id=session['id']) list = Body.query.filter_by(user_id=session['id']) labels = Body.query.with_entities(Body.date).filter_by(user_id=session['id']) values = Body.query.with_entities(Body.weight).filter_by(user_id=session['id']) bmi = Body.query.with_entities(Body.bmi).filter_by(user_id=session['id']) #labels = ["January", "February", "March", "April", "May", "June", "July", "August"] #values = [10, 9, 8, 7, 6, 4, 7, 8] return render_template('body.html', list=list, labels=labels, values=values, bmi=bmi) return redirect(url_for('loginPage')) @app.route('/loginPage') def loginPage(): return render_template('login.html') @app.route('/login', methods=['POST']) def login(): data = request.form email, password = data.get('email'), data.get('password') user = Users.query.filter_by(email=email, password=password).first() trainer = Trainers.query.filter_by(email=email, password=password).first() #print(user.cargo) if(user is None and trainer is None): return render_template('nologin.html') """if not user: return jsonify({'message' : 'Could not verify user!'}) """ print(user) if (trainer is not None): # or user==None session['id'] = trainer.id session['nome'] = trainer.nome session['apelido'] = trainer.apelido session['email'] = trainer.email session['img'] = trainer.imgpath session['cargo'] = trainer.cargo elif (user is not None): session['id'] = user.id session['nome'] = user.nome session['apelido'] = user.apelido session['email'] = user.email session['img'] = user.imgpath session['cargo'] = user.cargo return redirect(url_for('index')) @app.route('/logout') def logout(): session.pop('username', None) session.clear() check = {'check': 'true'} return redirect(url_for('loginPage')) @app.route('/registo') def registo(): return render_template('registo.html') @app.route('/detalhesUser', methods=['POST']) def detalhesUser(): if 'email' in session: data=request.args.get('id_user') if(session['cargo'] == "Treinador"): list = Trainers.query.filter_by(id=data) elif(session['cargo'] == "Utilizador" or session['cargo'] == "Administrador"): list = Users.query.filter_by(id=data) return render_template('detalhes_user.html', list=list) return redirect(url_for('loginPage')) @app.route('/adduser') def adduser(): if 'email' in session: u = Users.query.all() return render_template('adduser.html') return redirect(url_for('loginPage')) @app.route('/inserirusers', methods=['POST']) def inserirusers(): if 'email' in session: email = request.form['email'] password = request.form['password'] nome = request.form['nome'] apelido = request.form['apelido'] telefone = request.form['telefone'] peso = request.form['peso'] altura = request.form['altura'] datadenascimento = request.form['datadenascimento'] genero = request.form['genero'] saude = request.form['saude'] estilovida = request.form['estilovida'] cargo = request.form['cargo'] imgPerfil = request.files['imgPerfil'] filename = 'static/img/user.png' if request.method == 'POST' and imgPerfil: filename = 'static/upload/' + files.save(request.files['imgPerfil']) new_user = Users(email=email, password=password, nome=nome, apelido=apelido, telefone=telefone, peso=peso, altura=altura, data_nascimento=datadenascimento, genero=genero, saude=saude, estilo_vida=estilovida, cargo=cargo, imgpath=filename) db.session.add(new_user) db.session.commit() return redirect(url_for('verusers')) return redirect(url_for('loginPage')) @app.route('/deleteuser', methods=['POST']) def deleteusers(): if 'email' in session: data=request.args.get('id_user') Users.query.filter_by(id=data).delete() db.session.commit() return redirect(url_for('verusers')) return redirect(url_for('loginPage')) @app.route('/verusers') def verusers(): if 'email' in session: data = Users.query.all() return render_template('verusers.html', data=data) return redirect(url_for('loginPage')) @app.route('/updateusers', methods=['POST']) def updateusers(): if 'email' in session: data=request.args.get('id_user') update = Users.query.filter_by(id=data) return render_template('edituser.html', update=update) return redirect(url_for('loginPage')) @app.route('/editusers', methods=['POST']) def editusers(): if 'email' in session: data = request.form['id'] update = Users.query.filter_by(id=data).first() bd_img = update.imgpath imgPerfil = request.files['imgPerfil'] if request.method == 'POST' and 'email' in request.form: update.email = request.form['email'] if request.method == 'POST' and 'password' in request.form: if request.form['password'] == '': update.password = update.password else: update.password = request.form['password'] if request.method == 'POST' and 'nome' in request.form: update.nome = request.form['nome'] if request.method == 'POST' and 'apelido' in request.form: update.apelido = request.form['apelido'] if request.method == 'POST' and 'telefone' in request.form: update.telefone = request.form['telefone'] if request.method == 'POST' and 'peso' in request.form: update.peso = request.form['peso'] if request.method == 'POST' and 'altura' in request.form: update.altura = request.form['altura'] if request.method == 'POST' and 'cargo' in request.form: update.cargo = request.form['cargo'] if request.method == 'POST' and 'datadenascimento' in request.form: update.data_nascimento = request.form['datadenascimento'] if request.method == 'POST' and 'genero' in request.form: update.genero = request.form['genero'] if request.method == 'POST' and 'saude' in request.form: update.saude = request.form['saude'] if request.method == 'POST' and 'estilovida' in request.form: update.estilo_vida = request.form['estilovida'] update.imgPerfil = bd_img if request.method == 'POST' and imgPerfil: update.imgpath = 'static/upload/' + files.save(request.files['imgPerfil']) db.session.commit() return redirect(url_for('verusers')) return redirect(url_for('loginPage')) @app.route('/detalhesExercicio', methods=['POST']) def detalhesExercicio(): if 'email' in session: data=request.args.get('id_user') list = Exercicios.query.filter_by(id=data) return render_template('detalhes_exercicio.html', list=list) return redirect(url_for('loginPage')) @app.route('/addexercicio') def addexercicio(): if 'email' in session: e = Exercicios.query.all() return render_template('addexercicio.html') return redirect(url_for('loginPage')) @app.route('/inserirexercicio', methods=['POST']) def inserirexercicio(): if 'email' in session: nome = request.form['nome'] descricao = request.form['descricao'] gif = request.files['gif'] filename = 'static/img/user.png' if request.method == 'POST' and gif: filename = 'static/upload/' + files.save(request.files['gif']) new_exercicio = Exercicios(nome=nome, descricao=descricao, gifpath=filename) db.session.add(new_exercicio) db.session.commit() return redirect(url_for('verexercicio')) return redirect(url_for('loginPage')) @app.route('/verexercicio') def verexercicio(): if 'email' in session: data = Exercicios.query.all() return render_template('verexercicio.html', data=data) return redirect(url_for('loginPage')) @app.route('/deleteexercicio', methods=['POST']) def deleteexercicio(): if 'email' in session: data=request.args.get('id_exercicio') Exercicios.query.filter_by(id=data).delete() db.session.commit() return redirect(url_for('verexercicio')) return redirect(url_for('loginPage')) @app.route('/updateexercicio', methods=['POST']) def updateexercicio(): if 'email' in session: data=request.args.get('id_user') update = Exercicios.query.filter_by(id=data) return render_template('editexercicio.html', update=update) return redirect(url_for('loginPage')) @app.route('/editexercicio', methods=['POST']) def editexercicio(): if 'email' in session: data = request.form['id'] update = Exercicios.query.filter_by(id=data).first() bd_gif = update.gifpath gif = request.files['gif'] if request.method == 'POST' and 'nome' in request.form: update.nome = request.form['nome'] if request.method == 'POST' and 'descricao' in request.form: update.descricao = request.form['descricao'] update.gifpath = bd_gif if request.method == 'POST' and gif: update.gifpath = 'static/upload/' + files.save(request.files['gif']) db.session.commit() return redirect(url_for('verexercicio')) return redirect(url_for('loginPage')) @app.route('/detalhesTrainer', methods=['POST']) def detalhesTrainer(): if 'email' in session: data=request.args.get('id_user') list = Trainers.query.filter_by(id=data) return render_template('detalhes_trainer.html', list=list) return redirect(url_for('loginPage')) @app.route('/addtrainer') def addtrainer(): if 'email' in session: t = Trainers.query.all() return render_template('addtrainer.html') return redirect(url_for('loginPage')) @app.route('/inserirtrainer', methods=['POST']) def inserirtrainer(): if 'email' in session: email = request.form['email'] password = request.form['password'] nome = request.form['nome'] apelido = request.form['apelido'] telefone = request.form['telefone'] genero = request.form['genero'] imgPerfil = request.files['imgPerfil'] filename = 'static/img/user.png' if request.method == 'POST' and imgPerfil: filename = 'static/upload/' + files.save(request.files['imgPerfil']) new_trainer = Trainers(email=email, password=password, nome=nome, apelido=apelido, telefone=telefone, genero=genero, imgpath=filename) db.session.add(new_trainer) db.session.commit() return redirect(url_for('vertrainer')) return redirect(url_for('loginPage')) @app.route('/vertrainer') def vertrainer(): if 'email' in session: data = Trainers.query.all() return render_template('vertrainer.html', data=data) return redirect(url_for('loginPage')) @app.route('/deletetrainer', methods=['POST']) def deletetrainer(): if 'email' in session: data=request.args.get('id_trainer') Trainers.query.filter_by(id=data).delete() db.session.commit() return redirect(url_for('vertrainer')) return redirect(url_for('loginPage')) @app.route('/updatetrainer', methods=['POST']) def updatetrainer(): if 'email' in session: data=request.args.get('id_user') update = Trainers.query.filter_by(id=data) return render_template('edittrainer.html', update=update) return redirect(url_for('loginPage')) @app.route('/edittrainer', methods=['POST']) def edittrainer(): if 'email' in session: data = request.form['id'] update = Trainers.query.filter_by(id=data).first() bd_img = update.imgpath imgPerfil = request.files['imgPerfil'] if request.method == 'POST' and 'nome' in request.form: update.nome = request.form['nome'] if request.method == 'POST' and 'apelido' in request.form: update.apelido = request.form['apelido'] if request.method == 'POST' and 'genero' in request.form: update.genero = request.form['genero'] if request.method == 'POST' and 'email' in request.form: update.email = request.form['email'] if request.method == 'POST' and 'telefone' in request.form: update.telefone = request.form['telefone'] if request.method == 'POST' and 'password' in request.form: if request.form['password'] == '': update.password = update.password else: update.password = request.form['password'] update.imgpath = bd_img if request.method == 'POST' and imgPerfil: update.imgpath = 'static/upload/' + files.save(request.files['imgPerfil']) db.session.commit() return redirect(url_for('vertrainer')) return redirect(url_for('loginPage')) @app.route('/addplano') def addplano(): if 'email' in session: data = Exercicios.query.all() return render_template('addplano.html', data=data) return redirect(url_for('loginPage')) @app.route('/inserirplano', methods=['POST']) def inserirplano(): if 'email' in session: nome = request.form['nome'] descricao = request.form['descricao'] count = int(request.form['count']) if nome is None or descricao is None or nome == '' or descricao == '': return {'message' : 'please choose a client'}, 400, {'dataType':'application/json'} """try: results = ( db.session.query(Marking, Cars, User) .filter(Marking.date==date, Marking.hour==hour, Marking.car2id==Cars.id, Marking.client2id==User.id,) .first() ) except: return {'message' : 'Database Error'}, 400, {'dataType':'application/json'}""" """user = [results.User.nome, results.User.email, results.User.telefone, 'data'] car = [results.Cars.marca, results.Cars.cor, results.Cars.matricula, date] """ teste = 0 #print(count) output = [] for exercicio, rep, repet in zip(request.form.getlist('exercicio'),request.form.getlist('rep'),request.form.getlist('repet')): if not all((exercicio, rep, repet)): return {'message' : 'List item Empty'}, 400, {'dataType':'application/json'} data = {} data['exercicio'] = exercicio data['rep'] = rep data['repet'] = repet output.append(data) thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } #print(output) exercicio2 = "exercicio"+str(teste) rep2 = "rep"+str(teste) repet2 = "repet"+str(teste) for exercicio, rep, repet in zip(request.form.getlist('exercicio'+str(teste)),request.form.getlist('rep'+str(teste)),request.form.getlist('repet'+str(teste))): if not all((exercicio, rep, repet)): return {'message' : 'List item Empty'}, 400, {'dataType':'application/json'} data2 = {} data2['exercicio'+str(teste)] = exercicio data2['rep'+str(teste)] = rep data2['repet'+str(teste)] = repet output.append(data2) thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } #print(str(teste)) print(output) teste=teste+1 """novo_plano = Planos(nome=nome, descricao=descricao, exercicios=apelido, telefone=telefone, genero=genero, imgpath=filename) db.session.add(new_trainer) db.session.commit()""" #rendered = render_template('pdftemplate1.html',exercicio=exercicio, rep=rep, soma=repet, plist=output) return "hi welcome to chilli's" return redirect(url_for('loginPage')) @app.route('/verplano') def verplano(): if 'email' in session: return render_template('verplano.html') return redirect(url_for('loginPage'))
UTF-8
Python
false
false
24,718
py
16
app.py
2
0.611498
0.606683
0
687
34.957787
417
vigonotion/zha-device-handlers
13,030,930,792,736
f0bfef81f2d08cefec8c2464714c0b67d48a43b6
aeaf548fba8ee9f88cd9254f2bc4ac0a3bbfb207
/zhaquirks/samjin/button.py
a0f21836af935082e302ca0864a4e30dbf00bbe9
[ "Apache-2.0" ]
permissive
https://github.com/vigonotion/zha-device-handlers
6001aa812380a0540d76f68778ebade93f93928d
6d0560655428e1f04626a7722febf492c4174e8b
refs/heads/dev
2020-12-26T12:07:27.192810
2020-01-31T17:57:29
2020-01-31T17:57:29
237,504,327
1
0
Apache-2.0
true
2020-01-31T22:49:11
2020-01-31T19:47:15
2020-01-31T19:47:17
2020-01-31T22:49:10
391
0
0
0
null
false
false
"""Samjin button device.""" import logging from zigpy.profiles import zha from zigpy.quirks import CustomCluster, CustomDevice from zigpy.zcl.clusters.general import ( Basic, Identify, Ota, PollControl, PowerConfiguration, ) from zigpy.zcl.clusters.measurement import TemperatureMeasurement from zigpy.zcl.clusters.security import IasZone from . import CLICK_TYPES, SAMJIN from ..const import ( ARGS, BUTTON, COMMAND, COMMAND_BUTTON_DOUBLE, COMMAND_BUTTON_HOLD, COMMAND_BUTTON_SINGLE, COMMAND_ID, DEVICE_TYPE, DOUBLE_PRESS, ENDPOINTS, INPUT_CLUSTERS, LONG_PRESS, MODELS_INFO, OUTPUT_CLUSTERS, PRESS_TYPE, PROFILE_ID, SHORT_PRESS, ZHA_SEND_EVENT, ) _LOGGER = logging.getLogger(__name__) DIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821 class SamjinButton(CustomDevice): """Samjin button device.""" class IASCluster(CustomCluster, IasZone): """Occupancy cluster.""" cluster_id = IasZone.cluster_id def handle_cluster_request(self, tsn, command_id, args): """Handle a cluster command received on this cluster.""" if command_id == 0: state = args[0] & 3 event_args = { PRESS_TYPE: CLICK_TYPES[state], COMMAND_ID: command_id, ARGS: args, } action = "button_{}".format(CLICK_TYPES[state]) self.listener_event(ZHA_SEND_EVENT, self, action, event_args) signature = { # <SimpleDescriptor endpoint=1 profile=260 device_type=1026 # device_version=0 # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821] # output_clusters=[3, 25]> MODELS_INFO: [(SAMJIN, BUTTON)], ENDPOINTS: { 1: { PROFILE_ID: zha.PROFILE_ID, DEVICE_TYPE: zha.DeviceType.IAS_ZONE, INPUT_CLUSTERS: [ Basic.cluster_id, PowerConfiguration.cluster_id, Identify.cluster_id, PollControl.cluster_id, TemperatureMeasurement.cluster_id, IASCluster.cluster_id, DIAGNOSTICS_CLUSTER_ID, ], OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id], } }, } replacement = { ENDPOINTS: { 1: { PROFILE_ID: zha.PROFILE_ID, INPUT_CLUSTERS: [ Basic.cluster_id, PowerConfiguration.cluster_id, Identify.cluster_id, PollControl.cluster_id, TemperatureMeasurement.cluster_id, IASCluster, DIAGNOSTICS_CLUSTER_ID, ], OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id], } } } device_automation_triggers = { (DOUBLE_PRESS, DOUBLE_PRESS): {COMMAND: COMMAND_BUTTON_DOUBLE}, (SHORT_PRESS, SHORT_PRESS): {COMMAND: COMMAND_BUTTON_SINGLE}, (LONG_PRESS, LONG_PRESS): {COMMAND: COMMAND_BUTTON_HOLD}, }
UTF-8
Python
false
false
3,211
py
23
button.py
17
0.54687
0.53379
0
109
28.458716
77
fangpsh/cdn_support
11,965,778,919,784
b435f771391c36c513ef488d301b700170754c13
d04d37a04ddb7e9680840d17519e4344a284a638
/cdn.py
3296c6be51d85d47bc424b4c888bab7d823a2868
[]
no_license
https://github.com/fangpsh/cdn_support
7f19919406347455b33a55da0c9df5cf0c4ba082
765f9be6075e652fee0fba9a12620dc6b0cb2751
refs/heads/master
2021-06-18T21:42:25.241131
2017-07-10T11:56:35
2017-07-10T11:56:35
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import sys from pelican import signals import os import re reload(sys) sys.setdefaultencoding('utf8') def replace_link(pelican): cdn_type = pelican.settings['CDN_TYPE'] or [".css", ".js", ".jpeg", ".png", ".gif", ".jpg"] cdn_domain = pelican.settings['CDN_DOMAIN'] for root, dirs, files in os.walk(pelican.settings['OUTPUT_PATH']): for outfile in files: if os.path.splitext(outfile)[1] in [".html", ".htm"]: file_path = os.path.join(root, outfile) with open(file_path, "r+") as f: src = f.read() for ext in cdn_type: keyword = 'src' if ext == ".css": keyword = 'href' pattern = keyword+'="(' + pelican.settings['SITEURL'] + '/|/)([^:"]*)\\' + ext+'"' repl = keyword+'="//'+cdn_domain+'/'+r'\2'+ext+'"' src = re.sub(pattern, repl, src) f.seek(0) f.write(src) def register(): signals.finalized.connect(replace_link)
UTF-8
Python
false
false
1,144
py
3
cdn.py
2
0.473776
0.469406
0
37
29.918919
106
uvamiao/MyLeetcode
8,581,344,675,107
fc9d1c90120d4d913e07852cc5414901f35ce77f
50441d932e8c7ad22cf43d4aa83237d55f10d759
/528 Random Pick with Weight.py
d1f19b65195a8db9c9f52b7b8f9c6d739eac8b74
[]
no_license
https://github.com/uvamiao/MyLeetcode
845839649170094f9e10a0840bbbf04acd13af76
431aa7a2503a1205f9f8241e5de3ba4aeda9f44e
refs/heads/master
2020-03-25T01:33:33.162780
2019-06-10T07:49:25
2019-06-10T07:49:25
143,243,341
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" 528. Random Pick with Weight Given an array w of positive integers, where w[i] describes the weight of index i, write a function pickIndex which randomly picks an index in proportion to its weight. Note: 1 <= w.length <= 10000 1 <= w[i] <= 10^5 pickIndex will be called at most 10000 times. Input: ["Solution","pickIndex","pickIndex","pickIndex","pickIndex","pickIndex"] [[[1,3]],[],[],[],[],[]] Output: [null,0,1,1,1,0] """ class Solution: def __init__(self, w): """ :type w: List[int] """ sm = 0 self.psum = [] for weight in w: sm += weight self.psum.append(sm) def pickIndex(self): """ :rtype: int """ # use binary search to find the first index such that self.psum[index] >= target target = random.randint(1, self.psum[-1]) left, right = 0, len(self.psum) - 1 while left + 1 < right: mid = left + (right - left) // 2 if self.psum[mid] >= target: right = mid else: left = mid if self.psum[left] >= target: return left return right def PickKeys(self, Keys): # use binary search to find the first index such that self.psum[index] >= target val = random.random() # random float num in [0, 1] target = self.psum[-1] * val # min + (max - min) * val left, right = 0, len(self.psum) - 1 while left + 1 < right: mid = left + (right - left) // 2 if self.psum[mid] >= target: right = mid else: left = mid if self.psum[left] >= target: return Keys[left] return Keys[right] # import random # import collections # # w = [1, 2, 3] # obj = Solution(w) # # tmp = [] # for i in range(1000): # tmp.append(obj.pickIndex()) # # freq = collections.Counter(tmp) # print(freq) # # # ## follow up, w is float # # w2 = [1.5, 3.0, 4.5] # Keys = ['a', 'b', 'c'] # obj2 = Solution(w2) # # tmp2 = [] # for i in range(1000): # tmp2.append(obj2.PickKeys(Keys)) # # freq2 = collections.Counter(tmp2) # # print(freq2)
UTF-8
Python
false
false
2,187
py
18
528 Random Pick with Weight.py
17
0.527206
0.497485
0
88
23.784091
168
andreynovikov/utilery
11,957,188,986,638
6f1a9b59ecfcc6323f7eefda9cfce397406d0ff1
cb333bc4f2f7a55653ff347d715576f6eba1dca1
/utilery/core.py
f650f3ad02f6e1c17f680398f3bb310ce3a40904
[]
no_license
https://github.com/andreynovikov/utilery
43e7c6fff152ad7eff19276e9e904077f736a8d7
ff9bdcd5a57660e4a1c864b8637b73f205f9a237
refs/heads/master
2017-10-07T14:19:53.382027
2017-02-16T11:00:50
2017-02-16T11:00:50
81,192,435
0
0
null
true
2017-02-07T09:48:33
2017-02-07T09:48:33
2016-10-27T15:04:50
2016-08-27T17:12:46
48
0
0
0
null
null
null
import atexit import logging import time import psycopg2 import psycopg2.extras import yaml from pathlib import Path from . import config from .plugins import Plugins from .models import Recipe logger = logging.getLogger(__name__) if config.DEBUG: logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) RECIPES = {} class DB(object): DEFAULT = "default" _ = {} @classmethod def connect(cls, dbname=None): dbname = dbname or cls.DEFAULT if dbname not in cls._: cls._[dbname] = psycopg2.connect(config.DATABASES[dbname]) cls._[dbname].autocommit = True return cls._[dbname] @classmethod def fetchall(cls, query, args=None, dbname=None): before = time.time() cur = DB.connect(dbname).cursor( cursor_factory=psycopg2.extras.DictCursor) cur.execute(query, args) rv = cur.fetchall() cur.close() after = time.time() logger.debug('%s => %s\n%s', query, (after - before) * 1000, '*' * 40) return rv def close_connections(): logger.debug('Closing DB connections') for conn in DB._.values(): conn.close() atexit.register(close_connections) Plugins.load() Plugins.hook('before_load', config=config) def load_recipe(data): name = data.get('name', 'default') if name in RECIPES: raise ValueError('Recipe with name {} already exist'.format(name)) data['name'] = name RECIPES[name] = Recipe(data) if len(RECIPES) == 1 and name != 'default': RECIPES['default'] = RECIPES[data['name']] recipes = config.RECIPES if isinstance(recipes, str): recipes = [recipes] for recipe in recipes: with Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES)
UTF-8
Python
false
false
1,866
py
24
core.py
12
0.631297
0.625402
0
79
22.620253
78
morani80/collect_med_inst_cd_py
10,093,173,162,978
0c0e103ca9bf17302bee14ba0a627d5be5919875
b509ccd9c72b90bd7fd453d503c3b5a8c8c575e6
/collect_med_inst_cd/inst_cd_crawler.py
c7e9155699650784ad49978bd9070ce87ddd2b27
[]
no_license
https://github.com/morani80/collect_med_inst_cd_py
aae2f7e582a7e915bb1443bfad64fb688e7ef902
93434d55457ddba0898c0f48a8e5a6f51719a45c
refs/heads/master
2022-12-18T12:26:47.947466
2022-09-03T11:44:47
2022-09-03T11:44:47
282,172,065
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging import os import re import shutil import zipfile from typing import Callable from urllib.parse import urljoin import requests from .consts import BRANCH_ALL, BRANCH_KYUSYU, BRANCH_LIST from .excel_parser import ExcelParser from .output_csv_handler import OutputCsvHandler from .prefecture_finder import PrefectureFinder from .web_parser import BranchWebpageParser class MedInstCdCrawler: """ MedicalInstitutionCdCrawler """ def __init__(self): self._logger = logging.getLogger(__name__) self._web_parser = BranchWebpageParser() self._excel_parser = ExcelParser() self._pref_finder = PrefectureFinder() self._csv_handler = OutputCsvHandler() # 九州のzipは医科、歯科、薬局のxlsxがbundle. 医科のみを対象 self._zipfile_filter = {BRANCH_KYUSYU: re.compile(r'.+_ika_.*[xls|xlsx]$')} def download_cd_files(self, branch_id: int, dl_dir: str): """ Download files that have med_inst_cds from the branch page """ # download only. no need callback self._fetch_med_inst_cd(branch_id, dl_dir, None, '', False) def run(self, branch_id: int, out_dir: str = './output'): """ Get med_inst_cd list from the branch page """ output_file = "all_med_inst_cd.csv" if branch_id == BRANCH_ALL else f"{branch_id}_med_inst_cd.csv" self._csv_handler.clear_output_file(out_dir, output_file) self._fetch_med_inst_cd(branch_id, './tmp_data', self._output_med_list_from_file, out_dir, output_file) def _fetch_med_inst_cd(self, branch_id: int, dl_dir: str, output_handler: Callable[[int, int, str, str, str], None], out_dir: str, output_file: str): """ Download files from each kouseikyoku.mhlw.go.jp sites and sum them up to csv. """ if branch_id == BRANCH_ALL: for b_id in BRANCH_LIST: # call this recursively self._fetch_med_inst_cd(b_id, dl_dir, output_handler, out_dir, output_file) else: self._logger.debug(f"..branch-{branch_id} begin") file_urls = self._web_parser.extract_file_urls(branch_id) if file_urls: for i, f_url in enumerate(file_urls): saved_file = self._download_to(f_url, dl_dir) if saved_file: self._logger.debug(f"downloaded: {saved_file}") if output_handler: output_handler(branch_id, i+1, saved_file, out_dir, output_file) else: self._logger.warn(f"can not download file: {f_url}") def _output_med_list_from_file(self, branch_id: int, p_seq: int, file_path: str, out_dir: str, output_file: str) -> None: re_patt_excel = re.compile(r'.+[xls|xlsx]$') if re.match(r'.+zip$', file_path): unzip_dir = self._zip_extract(file_path) # specified pattern or excel pattern re_patt = self._zipfile_filter[branch_id] if branch_id in self._zipfile_filter else re_patt_excel self._output_file_in_dir(branch_id, unzip_dir, re_patt, out_dir, output_file) elif re_patt_excel.match(file_path): med_l = self._parse_file(branch_id, file_path) if med_l: self._logger.debug(f"output_csv_append: {file_path}") self._csv_handler.output_csv_append(out_dir, output_file, med_l) def _output_file_in_dir(self, branch_id: str, dir_path: str, file_filter: re.Pattern, out_dir: str, output_file: str) -> None: for f in os.listdir(dir_path): f_path = os.path.join(dir_path, f) if os.path.isdir(f_path): # call this recursively self._output_file_in_dir(branch_id, f_path, file_filter, out_dir, output_file) elif os.path.isfile(f_path) and file_filter.match(f): med_l = self._parse_file(branch_id, f_path) if med_l: self._logger.debug(f"output_csv_append: {f_path}") self._csv_handler.output_csv_append(out_dir, output_file, med_l) def _parse_file(self, branch_id: int, file_path: str) -> list: """ Parse Excel file and create med_inst_cd list. Add 10-digit med_inst_cd to each item of the list. """ # fileはprefecture単位の前提 med_l = self._excel_parser.parse(branch_id, file_path) if med_l: # Find the prefecture-cd using first valid zip_cd, add med_inst_cd(9 digit) with its prefecture-cd. # Must be one file for one prefecture. pref_cd = '' retry = 8 for med_item in med_l: zip_cd = med_item[2] if zip_cd: pref_cd = self._pref_finder.find_prefecture_cd(zip_cd) if pref_cd: # Add med_inst_cd(9 digit) at the first column med_l = [[f"{pref_cd:0>2}{item[0]}"]+item for item in med_l] break retry -= 1 if retry <= 0: self._logger.warn(f"can not find the prefecture cd: {file_path}") break if not pref_cd: med_l = [[""]+item for item in med_l] return med_l def _download_to(self, url: str, dl_dir: str) -> str: if not os.path.exists(dl_dir): os.mkdir(dl_dir) filename = url.split('/')[-1] savepath = os.path.join(dl_dir, filename) if os.path.exists(savepath): os.remove(savepath) res = requests.get(url, stream=True) with open(savepath, 'wb') as f: for chunk in res.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() return savepath def _zip_extract(self, zipfile_path: str) -> str: # file名をdirに unzip_dir = os.path.join(os.path.dirname(zipfile_path), os.path.splitext(os.path.basename(zipfile_path))[0]) if os.path.exists(unzip_dir): shutil.rmtree(unzip_dir) with zipfile.ZipFile(zipfile_path) as zfile: counter = 1 for info in zfile.infolist(): # skip directory within the zipfile if not info.filename.endswith(r'/'): if not re.match(r'^[a-zA-Z0-9\.-_]+$', info.filename): # ファイル名が全角記号、全角スペース含まれてたりする、予測不可能.置き換えちゃう # info.filename = info.filename.encode('cp437').decode('cp932') _, ext = os.path.splitext(info.filename) if not ext: # extが文字化けファイル名から取得できないケースがあったので,, splited = info.filename.split('.') ext = f".{splited[-1]}" info.filename = f"dl_{counter}{ext}" counter += 1 zfile.extract(info, unzip_dir) return unzip_dir
UTF-8
Python
false
false
7,258
py
13
inst_cd_crawler.py
11
0.549802
0.545699
0
179
38.486034
153
jiajie999/space-personal
14,199,161,910,455
6d5a76786a3ea6a9a10c8f4916446f7c6e0d5908
d43ac30ac612557cbbdcd8384cf8eca043e1bbfd
/dofunctions.py
ad578336a8b37462400e8d5d5b725443fd3cd0d1
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
https://github.com/jiajie999/space-personal
fc19a5e8e564d78093fbe5e119266b13473b2851
81b2d7ec7a4d720402bfd37f5668e78d00fb1418
refs/heads/master
2021-01-18T08:02:04.120075
2015-02-13T06:57:24
2015-02-13T06:57:24
30,744,087
0
0
null
true
2015-02-13T06:45:00
2015-02-13T06:44:59
2015-02-13T04:43:24
2015-02-13T04:43:24
2,112
0
0
0
null
null
null
import digitalocean import data from log import * def get_token(): config = data.get_config() return config['do_api_key'] def get_manager(): token = get_token() try: manager = digitalocean.Manager(token=token) except Exception as e: message = "Failed to get manager, DO API responded: %s" % str(e.args) create_log(message, 3) return manager def get_droplets(): manager = get_manager() all_droplets = manager.get_all_droplets() return all_droplets def get_droplet(id): manager = get_manager() try: droplet = manager.get_droplet(id) return droplet except Exception as e: message = "Failed to get droplet, DO API responded: %s" % str(e.args) create_log(message, 3) def import_droplets(): droplets = get_droplets() for droplet in droplets: droplet_id = str(droplet).split(" ")[0] server = data.get_server_provider_id(int(droplet_id)) print server.count() if server.count() == 0: droplet_obj = get_droplet(droplet_id) if droplet.status == "active": state = 1 elif droplet.status == "off": state = 0 else: state = 2 server_id = data.make_server(droplet.name, droplet.disk, droplet.image['slug'], droplet.memory, droplet.vcpus, type="do", id=int(droplet.id), ip=droplet.ip_address, state=state) def make_droplet(name, region, image, size, backups=0): token = get_token() if backups == 0: backups = False elif backups == 1: backups = True else: backups == False try: droplet = digitalocean.Droplet(token=token, name=name, region=region, image=image, size=size, backups=backups) droplet.create() droplet = get_droplet(str(droplet).split(" ")[0]) return droplet except Exception as e: message = "Failed to create droplet, DO API responded: %s" % str(e.args) create_log(message, 3) def destroy_droplet(id): manager = get_manager() try: droplet = manager.get_droplet(id) stat = droplet.destroy() if stat: return 1 else: return 0 except Exception as e: message = "Failed to destroy droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def shutdown_droplet(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.shutdown() except Exception as e: message = "Failed to shutdown droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def start_droplet(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.power_on() except Exception as e: message = "Failed to start droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def reboot_droplet(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.power_cycle() except Exception as e: message = "Failed to reboot droplet %s, DO API responded: %s" % (str(id),str(e.args)) def get_dist_images(): manager = get_manager() dist_images = manager.get_data("https://api.digitalocean.com/v2/images?page=1&per_page=1&type=distribution") for image in dist_images['images']: if image['slug']: data.make_do_image(image['slug'], image['id']) def get_sizes(): manager = get_manager() sizes = manager.get_data("https://api.digitalocean.com/v2/sizes") for size in sizes['sizes']: data.make_do_size(size['slug'], size['memory'], size['vcpus'], size['disk'], size['transfer'], size['price_monthly'], size['price_hourly']) def get_regions(): manager = get_manager() regions = manager.get_data("https://api.digitalocean.com/v2/regions") for region in regions['regions']: data.make_do_region(region['slug'], region['name']) def sync_status(): manager = get_manager() droplets = data.get_server_type("do") for droplet in droplets: try: d = manager.get_droplet(droplet['id']) except: pass if droplet['state'] == 0 and d.status == "active": data.set_server_state(droplet['_id'], 1) elif droplet['state'] == 1 and d.status == "off": data.set_server_state(droplet['_id'], 0) elif droplet['state'] < 3 and d.status == "archive": data.set_server_state(droplet['_id'], 3) if droplet['ram'] != d.memory: data.set_server_memory(droplet['_id'], d.memory) if droplet['vcpu'] != d.vcpus: data.set_server_vcpus(droplet['_id'], d.vcpus) if droplet['disk_size'] != d.disk: data.set_server_disk_size(droplet['_id'], d.disk) if droplet['state'] == 2 and d.status != "new": if d.status == "active": data.set_server_state(droplet['_id'], 1) elif d.status == "off": data.set_server_state(droplet['_id'], 0) def get_droplet_ipaddress(): manager = get_manager() droplets = data.get_server_type("do") for droplet in droplets: try: if not droplet['ip']: d = manager.get_droplet(droplet['id']) data.set_ipaddress_server(droplet['_id'], d.ip_address) if droplet['state'] == "2": if d.status == "active": data.set_server_state(droplet['_id'], 1) elif d.status == "off": data.set_server_state(droplet['_id'], 0) except: pass def resize_droplet(id, size): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.resize(size) except Exception as e: message = "Failed to resize droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def rename_droplet(id, name): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.rename(name) except Exception as e: message = "Failed to rename droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def reset_root_password(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.reset_root_password except Exception as e: message = "Failed to reset root password for droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def disable_backups(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.disable_backups() except Exception as e: message = "Failed to disable backups for droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def enable_private_networking(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.enable_private_networking() except Exception as e: message = "Failed to enable private networking for droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3) def enable_ipv6(id): manager = get_manager() try: droplet = manager.get_droplet(id) droplet.enable_ipv6() except Exception as e: message = "failed to enable IPv6 for droplet %s, DO API responded: %s" % (str(id), str(e.args)) create_log(message, 3)
UTF-8
Python
false
false
7,764
py
10
dofunctions.py
7
0.569294
0.563756
0
227
33.202643
189
tonytanCoder/py-account-service
2,310,692,420,693
1fe50b0e73524f13d69a0b598d01b09048df4f70
1a1f0cafd9c10fa95fe05aebaf82e60b6dcd9076
/com/tan/account/sqlalchemydemo/userqry.py
4b34ce82871d9d18a5b0b09789c50958d2f4802e
[]
no_license
https://github.com/tonytanCoder/py-account-service
c1f7da4a862eb1c21ab78cbfdea6857a6b4159f5
7be81552a2774a624687774904bf137570f60ece
refs/heads/master
2020-09-14T10:08:31.958101
2019-11-21T06:02:38
2019-11-21T06:02:38
223,099,653
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# 导入: from userservice import User from userservice import DBSession class UserQryService(): def qryAllUser(self): # 创建Session: session = DBSession() # 创建Query查询,filter是where条件,最后调用one()返回唯一行,如果调用all()则返回所有行: # user = session.query(User).filter(User.id == '5').one() users = session.query(User).all(); # 打印类型和对象的name属性: # print('type:', type(user)) # print('user_name:', user.user_name) # 关闭Session: session.close() return users;
UTF-8
Python
false
false
633
py
5
userqry.py
4
0.578269
0.576427
0
19
27.263158
67
brechmos-stsci/crds
5,111,011,121,639
5e84e243de00facdcf8ea37a73e02103b9433262
1b3a063dc1c619eb06eed51a4c00afc06df6adb9
/crds/tests/__init__.py
22e775fd41e82646b71a44576b56a8bf62ab943c
[ "BSD-2-Clause" ]
permissive
https://github.com/brechmos-stsci/crds
7ca51852856a654647f646eb38b28b3d83d778ce
ffe7ee827c566b9e85de25863d31712e801f999a
refs/heads/master
2021-01-16T13:41:17.441224
2016-06-21T16:47:41
2016-06-21T16:47:41
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import division # confidence high from __future__ import print_function from __future__ import absolute_import from .test_config import *
UTF-8
Python
false
false
156
py
23
__init__.py
8
0.75
0.75
0
5
30
49
davidmunoz4185/dand_project4
8,366,596,305,371
919a1eb3923859e0377a61b0a5cc49ba3693cd98
f62b3c80feab32848a71d823dd9a66002d992fd2
/audit.py
769cffb43bc42a404c2ab4c215b24529d416b723
[]
no_license
https://github.com/davidmunoz4185/dand_project4
beb006ea502f8b0aca741b89c159254dc704a577
3756e65b5369d779382d192a76fb457c7c912a8a
refs/heads/master
2021-08-22T21:10:56.280071
2017-12-01T09:18:05
2017-12-01T09:18:05
112,719,516
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import re tipo_via_set = [ 'Avenida', 'Calle', 'Camino', 'Paseo', 'Plaza', 'Carrera', 'Ronda', 'Carretera', 'Pasaje' ] POSTCODE_PATTERN = re.compile(r'^(E)?(28[0-9]{3})') def split_by_char(string_in, char_in, pos_in, pos_out, set_in): first_field = string_in.split(char_in)[pos_in-1] if first_field in set_in and char_in in string_in: return string_in.split(char_in)[pos_out-1] return string_in def treat_tipo_via(string_in): string_out = string_in if string_out not in tipo_via_set: string_out = split_by_char(string_out, " ", 1, 1, tipo_via_set) string_out = string_out.upper()[:1] + string_out.lower()[1:] if string_out not in tipo_via_set: return "wrong value [{}]".format(string_in) return string_out def treat_postcode(string_in): string_out = "28000" m = POSTCODE_PATTERN.search(string_in) if m: return m.group(2) return string_out
UTF-8
Python
false
false
1,071
py
8
audit.py
2
0.567694
0.550887
0
46
21.282609
71
ssh0/growing-string
15,247,133,931,476
136c60fa61131bd58652fc11540c7d54da39f803
702574ec18a35258ce1a028c8ecf3dd91197b514
/triangular_lattice/eden/eden_sticky.py
26f2c52cf86e39fdd1f80a8efa08073dd54acefd
[ "MIT" ]
permissive
https://github.com/ssh0/growing-string
4d5096225e4478913c654646d664f59d4bf0e88b
2e43916e91157dfb4253775149b35ec9d81ef14d
refs/heads/master
2020-04-12T03:10:44.130839
2017-03-24T07:51:44
2017-03-24T07:51:44
56,047,471
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding:utf-8 -*- # # written by Shotaro Fujimoto # 2016-09-23 """Eden model on triangular lattice""" from eden import Eden import numpy as np def print_debug(arg): """Print argument if needed. You can use this function in any parts and its behavior is toggled here. """ # print arg pass if __name__ == '__main__': Lx, Ly = 100, 60 eden = Eden(Lx, Ly, frames=5000, boundary={'h': 'reflective', 'v': 'reflective'}) # eden = Eden(plot=False) eden.points = [(i, 0) for i in range(Lx)] eden.occupied[list(np.array(eden.points).T)] = True eden.neighbors = [(i, 1) for i in range(Lx)] eden.execute() print_debug(eden.occupied) print_debug(len(eden.neighbors)) print_debug(len(np.where(eden.occupied)[0])) print_debug(len(eden.points)) print_debug(eden.points)
UTF-8
Python
false
false
858
py
127
eden_sticky.py
84
0.628205
0.60373
0
35
23.485714
85
arunrock98/Amazon---Product-Analysis
6,786,048,370,949
804a32f656886d29343cb8c9bfbd1903b38b34b7
6e774191a7778d684629db257ed10b42e391d236
/create_search_urls.py
f9f21b1f07d243f4c0d13215441577c0fd9922e5
[]
no_license
https://github.com/arunrock98/Amazon---Product-Analysis
aed90218bf52678a3763b4f4dc39e1b999001537
f5c661a4e866f0056568ccc48f3ae78403af5c9f
refs/heads/main
2023-05-14T00:42:42.442371
2021-06-03T14:27:22
2021-06-03T14:27:22
354,801,103
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
f = open('search_urls.txt', 'w+') url = 'https://www.amazon.com/s?k=processor&i=computers&ref=nb_sb_noss_1' f.write(url) f.write('\n') for i in range(2,401): url = 'https://www.amazon.com/s?k=processor&i=computers&page=2' + str(i) + '&qid=1617019761&ref=sr_pg_' + str(i) f.write(url) f.write('\n') f.close()
UTF-8
Python
false
false
322
py
10
create_search_urls.py
1
0.618012
0.568323
0
12
25.916667
116
harshays/solutions
3,642,132,310,000
65de62a79a0a3f49ccd2a1d31a9955e1259e972a
2cd6098bf2181b789ca0877ce5909bbca8d38d95
/euler/py/036.py
7e2169d48358459721e0c5a36c98a8da3dce9340
[]
no_license
https://github.com/harshays/solutions
e48be3d20c1a0651a8a5d23684889261d3526071
cb5fafb617a652d8f18f04b250851a917c92ca35
refs/heads/master
2021-01-21T14:09:22.623741
2016-05-09T18:23:17
2016-05-09T18:23:17
29,054,743
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# total of all palindromes in base 10 and 2 def tobinary(n): return str(bin(n))[2:] def palindrome(n): return str(n) == str(n)[::-1] total = 0 for n in range(1,1000000): if palindrome(n) and palindrome(tobinary(n)): total += n print (total)
UTF-8
Python
false
false
245
py
136
036.py
131
0.669388
0.612245
0
8
29.75
60
alsomeb/Bankomat
18,098,992,221,466
b635ccb2ddb9764017615bee820d195b698cdb9e
28770b64ded422d17fd7c0fe812d4f1c8b45415a
/bankomat.py
465a9a93449d82651e5d2a26944914de51c930b3
[]
no_license
https://github.com/alsomeb/Bankomat
88ba531cbff9095bb02f247e9d89df189e50ea7d
30c04a86ae1db7ed8d7a386e3ec70e3c359ad40b
refs/heads/main
2023-08-16T20:50:35.468839
2021-10-03T19:09:17
2021-10-03T19:09:17
406,722,881
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json import datetime import time from functions import MainMenu from functions import AccountMenu from termcolor import colored,cprint #färger används för snygg "GUI" accounts = {} #konton log_list = {} #loggar transaktioner i trans.json with open("log.json") as json_file: #öppnar fil för kontonr och tillhörande saldo accounts = json.load(json_file) with open("trans.json") as json_file: #öppnar fil för transaktioner log_list = json.load(json_file) #start, såg mer fördelar köra inputsen som string, om man skrev fel så loopas menyn bara om, förutom belopp körs med INT while True: anykey_text = colored("Tryck enter för att fortsätta: ", color="green", attrs=["bold"])#färg och atr för anykey anykey = input(anykey_text)#gör så att det blir snyggare med outputen, men kan bli tjatigt kanske MainMenu() val_text = colored("Ange menyval: ", color="green", attrs=["bold"]) #val styling val = input(val_text) if val == "3": #Avslutar bankomaten break if val == "1": while True: #konto nr är string, blev enklare för mig så kontonummer = input(colored("Ange ett nytt kontonr: ", 'green', attrs=['bold'])) #annan metod för styling if kontonummer in accounts: cprint("Kontonummret finns redan!", 'red', attrs=['bold']) tryAgain = input(colored("Vill du prova ett annat kontonummer, (j/n): ", 'green', attrs=['bold']).lower()) #om kontonr nr redan finns = felmed if tryAgain != "j": break else: cprint(f"Du har skapat ett konto med kontonr: {kontonummer}", 'green', attrs=['bold']) accounts[kontonummer] = 0 # Skapar ett nytt kontonummer i accounts med 0 saldo i dictionaryn (accounts[key] = value) log_list[kontonummer] = [] # lägger in en tom lista i dict value (nesting). Tex kontonr "123" = [tom lista] break if val != "3" and val != "2" and val != "1": #felmed om ej valt rätt menyval print(colored("Du måste välja en siffra mellan 1-3", 'red', attrs=['bold'])) if val == "2": kontonummer = input(colored("Ange kontonummer: ", 'green', attrs=['bold'])) while True: if kontonummer in accounts: anykey_text = colored("Tryck enter för att fortsätta: ", color="green", attrs=["bold"]) anykey = input(anykey_text) AccountMenu() val_text2 = colored("Ange menyval: ", color="green", attrs=["bold"]) #val styling val = input(val_text2) if val == "5": #Avsluta break if val == "1": #Ta ut pengar uttag = input(colored("Ange belopp: ", 'green', attrs=['bold'])) if uttag.isnumeric(): #kollar att det är siffror uttag = int(uttag) #gör om inputen till en int saldo = accounts[kontonummer] if uttag > saldo: print(colored(f"Du har inte tillräckligt på kontot, ditt saldo är {saldo}", 'red', attrs=['bold'])) elif uttag == 0: print(colored("Du måste ange högre än 0kr", "red", attrs=['bold'])) else: accounts[kontonummer] = saldo - uttag print(colored(f"Du har gjort ett uttag på: {uttag}", 'green', attrs=['bold'])) log_list[kontonummer].append(colored(f"{datetime.date.today()}, {time.strftime('%H:%M')}, UTTAG: {uttag}kr", 'red', attrs=['bold'])) else: cprint("Endast siffror tillåtna, ej bokstäver och specialtecken!", 'red', attrs=['bold']) #kodat så att man ej kan skriva +,- eller andra bokstäver if val == "3": #Visa saldo saldo = accounts[kontonummer] print(colored(f"Ditt saldo är: {saldo}", 'green', attrs=['bold'])) if val == "2": #Sätt in pengar insattning = input(colored("Ange belopp: ", 'green', attrs=['bold'])) if insattning.isnumeric(): #kollar att det är siffror insattning = int(insattning) saldo = accounts[kontonummer] accounts[kontonummer] = saldo + insattning print(colored(f"Du har gjort en insattning på: {insattning}kr på ditt konto", 'green', attrs=['bold'])) log_list[kontonummer].append(colored(f"{datetime.date.today()}, {time.strftime('%H:%M')}, INSÄTTNING: {insattning}kr", 'green', attrs=['bold'])) elif insattning == 0: print(colored("Du måste ange högre än 0kr", "red", attrs=['bold'])) else: cprint("Endast siffror tillåtna, ej bokstäver och specialtecken!", 'red', attrs=['bold']) #kodat så att man ej kan skriva +,- eller andra bokstäver if val == "4": #Loopar igenom trans.json filen och printar transaktionshistoriken för aktuellt konto for trans in log_list[kontonummer]: print(f"{trans}\n") if val != "1" and val != "2" and val != "3" and val != "4" and val != "5": #felmed om ej valt rätt menyval print(colored("Du måste välja en siffra mellan 1-5", 'red', attrs=['bold'])) else: cprint("Du har angivit fel kontonr, går tillbaka till huvudmeny", 'red', attrs=['bold']) break #döpte variablerna bara till random bokstäver, har inge större betydelse, bara komma ihåg att skriva dem rätt j = json.dumps(accounts) with open("log.json", "w") as f: #sparar kontonr och tillhörande saldo f.write(j) t = json.dumps(log_list) with open("trans.json", "w") as f: #spara transaktioner f.write(t)
UTF-8
Python
false
false
6,103
py
2
bankomat.py
2
0.555335
0.550199
0
106
55.933962
171
kevinlondon/leetcode
13,460,427,523,070
b93404637f0df2fa515acedca525594eba5ea7d0
22717a981aded63f0f107d8fd4e25e58ee1db842
/0000-0999/021_merge_sorted_lists.py
84b4013ff39bff4f3b49af21d03927a331ddc31d
[]
no_license
https://github.com/kevinlondon/leetcode
f788daca8fa82f6eed1f58685cb232a3e804de0f
47a24ab1fbc09cbcb8eaf2882db179b186a708dc
refs/heads/master
2023-06-23T04:41:24.238953
2023-06-21T03:29:46
2023-06-21T03:29:46
124,432,928
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]: merged_head, node = None, None i, j = list1, list2 while i or j: a = i.val if i else float('inf') b = j.val if j else float('inf') if (i and not j) or a < b: next_node = i i = i.next else: next_node = j j = j.next if not node: merged_head = next_node else: node.next = next_node node = next_node return merged_head
UTF-8
Python
false
false
804
py
151
021_merge_sorted_lists.py
150
0.477612
0.471393
0
29
26.724138
104
gauravkhandelwal140/Inventory-management-system-django
11,768,210,411,891
5da3728d7c1fe3ca06834550d482b422803b3d52
6fb4116858b4906fb519ccaec3b3b4dcbe15b815
/product/views.py
b47ad13a15e0076b88c0234e13513012af18cf54
[]
no_license
https://github.com/gauravkhandelwal140/Inventory-management-system-django
cecb3b1c094bf75f24adcec0eccf5e23b300eb75
a4c4f16c5d9af86fb1c766022663a2620e64753e
refs/heads/master
2023-07-09T09:02:12.328422
2021-08-10T21:06:28
2021-08-10T21:06:28
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import redirect, HttpResponseRedirect, reverse from django.contrib import messages from django.db import transaction from product.models import Product, StockIn, StockOut from django.views.generic import ListView, DetailView, FormView, TemplateView from django.core.cache import cache from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from product.forms import StockOutForm, StockDetailsForm, ProductForm, StockDetailsForm from django.core.exceptions import ObjectDoesNotExist from django.http import Http404 from django.views.decorators.csrf import csrf_exempt from django.http import JsonResponse class ProductCreateView(FormView): form_class = StockDetailsForm template_name = "add_product.html" def post(self, request, *args, **kwargs): try: product_name = request.POST.get('name', 'None') brand_name = request.POST.get('brand_name', 'None') unit_type = request.POST.get('unit_type', 'None') if unit_type=='None': unit_type='Quantity' bar_code = request.POST.get('bar_code', 'None') quantity = request.POST.get('quantity', 'None') buying_price_item = request.POST.get('buying_price_item', 'None') price_per_item = request.POST.get('price_per_item', 'None') dated_order = request.POST.get('dated_order', 'None') total_amount = request.POST.get('total_amount', 'None') total_buying_amount = request.POST.get('total_buying_amount', 'None') stock_expiry = request.POST.get('stock_expiry', 'None') with transaction.atomic(): prod_form_kwargs = { 'unit_type': unit_type, 'name': product_name, 'brand_name': brand_name, 'bar_code': bar_code } product_form = ProductForm(prod_form_kwargs) if product_form.is_valid(): print('prodouct form successfully Created') product = product_form.save() stockIn_kwargs = { 'product': product, 'quantity': quantity, 'price_per_item': price_per_item, 'total_amount': total_amount, 'buying_price_item': buying_price_item, 'total_buying_amount': total_buying_amount, 'dated_order': dated_order, 'stock_expiry': stock_expiry } stockinform = StockDetailsForm(stockIn_kwargs) if stockinform.is_valid(): stockinform.save() messages.success(request,"Product Added!") return redirect(reverse_lazy('product')) except: messages.warning(request,'Product Not Added!') return redirect(reverse_lazy('product')) class ProductUpdateView(UpdateView): template_name = 'update_product.html' model = Product form_class = ProductForm success_url = reverse_lazy('product') def form_valid(self, form): messages.success(self.request, 'Product Updated Successfully!') return super().form_valid(form) class ProductItemList(ListView): template_name = 'product.html' context_object_name = 'products' model = Product paginate_by = 40 ordering = '-id' def dispatch(self, request, *args, **kwargs): return super(ProductItemList, self).dispatch(request, *args, **kwargs) class ProductDetailView(DetailView): model = Product template_name = 'product_detail.html' def dispatch(self, request, *args, **kwargs): if not self.request.user.is_authenticated: cache.clear() return HttpResponseRedirect(reverse_lazy('login')) return super(ProductDetailView, self).dispatch(request, *args, **kwargs) class StockOutItems(FormView): form_class = StockOutForm template_name = 'add_stock_out.html' def dispatch(self, request, *args, **kwargs): return super(StockOutItems, self).dispatch(request, *args, **kwargs) def form_valid(self, form): product_item_detail = form.save() messages.success(self.request, message='Stock Out Successfully!') return HttpResponseRedirect(reverse_lazy('stock_out_list', args=[ self.kwargs.get('product_id')]) ) def form_invalid(self, form): return super(StockOutItems, self).form_invalid(form) def get_context_data(self, **kwargs): context = super(StockOutItems, self).get_context_data(**kwargs) try: product = (Product.objects.get(id=self.kwargs.get('product_id')) ) except ObjectDoesNotExist: raise Http404('Product not found with concerned User') context.update({ 'product': product }) return context class AddStockItems(CreateView): template_name = 'add_stock_in.html' form_class = StockDetailsForm success_url = '/product/' def dispatch(self, request, *args, **kwargs): return super(AddStockItems, self).dispatch(request, *args, **kwargs) def form_valid(self, form): product_item_detail = form.save() messages.success(self.request, 'Stock In Successfully!') return HttpResponseRedirect(reverse('stock_in_list', args=[self.kwargs.get('pk')])) def form_invalid(self, form): return super(AddStockItems, self).form_invalid(form) def get_context_data(self, **kwargs): context = super(AddStockItems, self).get_context_data(**kwargs) try: product = (Product.objects.get(id=self.kwargs.get('pk')) ) except ObjectDoesNotExist: raise Http404('Product not found with concerned User') context.update({ 'product': product }) return context class StockInListView(ListView): template_name = 'stock_in_list.html' paginate_by = 30 model = StockIn ordering = '-id' def get_queryset(self): queryset = self.queryset if not queryset: queryset = StockIn.objects.all() queryset = queryset.filter(product=self.kwargs.get('pk')) return queryset.order_by('-id') def get_context_data(self, **kwargs): context = super(StockInListView, self).get_context_data(**kwargs) context.update({ 'product': Product.objects.get(id=self.kwargs.get('pk')) }) return context class StockOutDeleteView(DeleteView): model = StockOut template_name = "stock_in_list.html" def get(self, request, *args, **kwargs): self.success_url = request.META.get('HTTP_REFERER', '/product/') messages.warning(self.request,'StockOut Record Deleted!') return self.delete(request, *args, **kwargs) class StockInDeleteView(DeleteView): model = StockIn template_name = "stock_in_list.html" def get(self, request, *args, **kwargs): self.success_url = request.META.get('HTTP_REFERER', '/product/') messages.warning(self.request,'StockIn Record Deleted!') return self.delete(request, *args, **kwargs) class StockOutListView(ListView): template_name = 'stock_out_list.html' paginate_by = 30 model = StockOut ordering = '-id' def get_queryset(self, **kwargs): queryset = self.queryset if not queryset: queryset = StockOut.objects.all() queryset = queryset.filter(product=self.kwargs.get('pk')) return queryset.order_by('-id') def get_context_data(self, **kwargs): context = super(StockOutListView, self).get_context_data(**kwargs) context.update({ 'product': Product.objects.get(id=self.kwargs.get('pk')) }) return context class StockInUpdateView(UpdateView): template_name = 'stock_in_update.html' model = StockIn form_class = StockDetailsForm success_url = '/product/' def form_valid(self, form): messages.success(self.request, 'StockIn Updated Successfully!') return super().form_valid(form) class ProductDeleteView(DeleteView): model = Product template_name = "product.html" success_url = '/product/' def get(self, request, *args, **kwargs): messages.warning(request,'Product Deleted!') return self.delete(request, *args, **kwargs)
UTF-8
Python
false
false
8,543
py
103
views.py
60
0.620859
0.619103
0
235
35.353191
91
skeptycal/pants
403,726,932,577
12612ef3a6c7cc1e556c31561856f9b6acb946cc
05ce8d3828ecdc3e4b92fd4533489e8e8ff2bf12
/contrib/node/src/python/pants/contrib/node/tasks/node_resolve.py
24a43879627a875c55b9015f58d599b41f58b00d
[ "Apache-2.0" ]
permissive
https://github.com/skeptycal/pants
3c40c6f7e7ac7102c116aed6f2df06b4eab7e5e4
3bb513ce74aefe63fb683bd9b01cc514b7068961
refs/heads/master
2022-01-19T14:29:52.806823
2020-10-21T19:05:32
2020-10-21T19:05:32
212,719,141
1
0
Apache-2.0
true
2022-03-27T05:18:01
2019-10-04T02:08:43
2020-10-21T19:05:36
2022-03-27T05:18:01
97,362
1
0
11
Python
false
false
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os from hashlib import sha1 from pants.base.build_environment import get_buildroot from pants.base.fingerprint_strategy import DefaultFingerprintHashingMixin, FingerprintStrategy from pants.base.workunit import WorkUnitLabel from pants.build_graph.build_graph import sort_targets from pants.contrib.node.tasks.node_paths import NodePaths, NodePathsLocal from pants.contrib.node.tasks.node_task import NodeTask class NodeResolveFingerprintStrategy(DefaultFingerprintHashingMixin, FingerprintStrategy): """ Fingerprint package lockfiles (e.g. package.json, yarn.lock...), so that we don't automatically run this if none of those have changed. We read every file and add its contents to the hash. """ _package_manager_lockfiles = { 'yarn': ['package.json', 'yarn.lock'], 'npm': ['package.json', 'package-lock.json', 'npm-shrinkwrap.json'] } def _get_files_to_watch(self, target): package_manager = target.payload.get_field_value("package_manager", '') # NB: Defaults to empty list for things like scalajs ad-hoc packages. lockfiles = self._package_manager_lockfiles.get(package_manager, []) paths = [os.path.join(target.address.spec_path, name) for name in lockfiles] return paths def compute_fingerprint(self, target): if NodeResolve.can_resolve_target(target): hasher = sha1() for lockfile_path in self._get_files_to_watch(target): absolute_lockfile_path = os.path.join(get_buildroot(), lockfile_path) # NB: It should not be up to the caching to decide what happens when a # lockfile is not added in sources. if os.path.exists(absolute_lockfile_path): with open(absolute_lockfile_path, 'r') as lockfile: contents = lockfile.read().encode() hasher.update(contents) return hasher.hexdigest() return None class NodeResolve(NodeTask): """Resolves node_package targets to their node paths using different registered resolvers. This task exposes two products NodePaths and NodePathsLocal. Both products are handled optionally allowing the consumer to choose. NodePaths contain a mapping of targets and their resolved path in the virtualized pants working directory. NodePathsLocal is similar to NodePaths with the difference that the resolved path is within the same directory that the target is defined. A node path is considered resolved if the source files are present, installed all dependencies, and have executed their build scripts if defined. """ _resolver_by_type = dict() @classmethod def product_types(cls): return [NodePaths, NodePathsLocal] @classmethod def prepare(cls, options, round_manager): """Allow each resolver to declare additional product requirements.""" super().prepare(options, round_manager) for resolver in cls._resolver_by_type.values(): resolver.prepare(options, round_manager) @property def cache_target_dirs(self): return True @classmethod def register_resolver_for_type(cls, node_package_type, resolver): """Register a NodeResolver instance for a particular subclass of NodePackage. Implementation uses a hash on node_package_type, so the resolver will only be used on the exact NodePackage subclass (not further subclasses of it). :param class node_package_type: A NodePackage subclass :param class resolver: A NodeResolverBase subclass """ cls._resolver_by_type[node_package_type] = resolver @classmethod def _clear_resolvers(cls): """Remove all resolvers. This method is EXCLUSIVELY for use in tests. """ cls._resolver_by_type.clear() @classmethod def _resolver_for_target(cls, target): """Get the resolver registered for a target's type, or None if there is none. :param NodePackage target: A subclass of NodePackage. :rtype: NodeResolver """ return cls._resolver_by_type.get(type(target)) @classmethod def can_resolve_target(cls, target): """Returns whether this is a NodePackage and there a resolver registered for its subtype. :param target: A Target :rtype: Boolean """ return cls.is_node_package(target) and cls._resolver_for_target(target) != None def _topological_sort(self, targets): """Topologically order a list of targets""" target_set = set(targets) return [t for t in reversed(sort_targets(targets)) if t in target_set] def execute(self): targets = self.context.targets(predicate=self.can_resolve_target) if not targets: return if self.context.products.is_required_data(NodePaths): node_paths = self.context.products.get_data(NodePaths, init_func=NodePaths) # We must have copied local sources into place and have node_modules directories in place for # internal dependencies before installing dependees, so `topological_order=True` is critical. with self.invalidated(targets, topological_order=True, invalidate_dependents=True, fingerprint_strategy=NodeResolveFingerprintStrategy() ) as invalidation_check: with self.context.new_workunit(name='install', labels=[WorkUnitLabel.MULTITOOL]): for vt in invalidation_check.all_vts: target = vt.target if not vt.valid: resolver_for_target_type = self._resolver_for_target(target).global_instance() resolver_for_target_type.resolve_target(self, target, vt.results_dir, node_paths) node_paths.resolved(target, vt.results_dir) if self.context.products.is_required_data(NodePathsLocal): node_paths_local = self.context.products.get_data(NodePathsLocal, init_func=NodePathsLocal) # Always resolve targets if NodePathsLocal is required. # This is crucial for `node-install` goal which builds against source code and relies on # latest and nothing from the pants cache. The caching is done locally via the node_modules # directory within source and managed by the underlying package manager. In the future, # it can possible to be able to reuse work from the private pants copy. sorted_targets = self._topological_sort(targets) with self.context.new_workunit(name='node-install', labels=[WorkUnitLabel.MULTITOOL]): for target in sorted_targets: resolver_for_target_type = self._resolver_for_target(target).global_instance() results_dir = os.path.join(get_buildroot(), target.address.spec_path) resolver_for_target_type.resolve_target(self, target, results_dir, node_paths_local, resolve_locally=True, install_optional=True, frozen_lockfile=False) node_paths_local.resolved(target, results_dir)
UTF-8
Python
false
false
7,026
py
654
node_resolve.py
419
0.700114
0.698975
0
163
42.104294
99
jmquintana79/utilsDS
5,884,105,204,750
9b0ca16efbf248ed04c21eae5cd9f9f98cf067f4
0d9f12b352e8975dc9a5e937a3e005f27d034db3
/scripts/analysis/ADA/tools.py
a301f53a855b40c1f0eafc001e45013b558c7c87
[ "MIT" ]
permissive
https://github.com/jmquintana79/utilsDS
4228c8af65d29ea2920b49632b456defccff6239
aeef8e8d6f94cf9573164b5dcaa930004ed07ae7
refs/heads/master
2023-07-22T03:01:30.958181
2023-07-12T16:46:15
2023-07-12T16:46:15
143,678,150
0
1
MIT
false
2023-07-06T23:03:58
2018-08-06T04:54:09
2022-01-07T16:16:24
2023-07-06T23:03:57
36,869
0
1
7
Jupyter Notebook
false
false
import numpy as np import pandas as pd from classes import Columns ## thresholds for metrics according level of exigence def thresholds_according_level_exigence(level:str = 'normal')->dict: """ Thresholds for metrics according level of exigence. Parameters ---------- level : str, optional Level of exigence. It is only possible 'low', 'normal' and 'high'. The default is 'normal'. Returns ------- dict Thresholds according to the selected level of exigence. """ # validate assert level in ['low', 'normal', 'high'], "It is only possible these level of exigences: 'low', 'normal', 'high'" # initialize dexigence = dict() # set if level == 'low': dexigence['correlation'] = 0.25 dexigence['significance'] = 0.1 elif level == 'normal': dexigence['correlation'] = 0.5 dexigence['significance'] = 0.05 else: dexigence['correlation'] = 0.75 dexigence['significance'] = 0.01 # return return dexigence ## Check if there are enough data to do a statistics def check_is_enough_data(data:np.array, threshold:int)->bool: """ Check if there are enough data to do a statistics. Parameters ---------- data : np.array Data to be validated. threshold : int Limit to consider enough or not. Returns ------- bool Result of the validation. """ # remove nan values data = data[~np.isnan(data)] # validate and return if len(data) >= threshold: return True else: return False ## Get type of data in a dataframe def get_type_columns(df:pd.DataFrame)->dict: """ Get type of data in a dataframe. Parameters ---------- df : pd.DataFrame Dataframe to be used. Returns ------- dict Type of data per column. """ # get columns cols = Columns(df.dropna()) # initialize dtypecols = dict() # loop of columns for col in df.columns.tolist(): # validate and set type if col in list(cols.num): dtypecols[col] = 'num' elif col in list(cols.ord): dtypecols[col] = 'ord' elif col in list(cols.cat): dtypecols[col] = 'cat' else: dtypecols[col] = 'other' # return return dtypecols
UTF-8
Python
false
false
2,443
py
201
tools.py
54
0.564061
0.557511
0
100
22.89
118
Akshay-s-raut/Python
15,985,868,319,367
2c6ad1bfe30cb279a12f9fb175e521151afb6d74
1c8abb9e956ba76d3b7c8b0fa4968ec6161e40fd
/Maths tools/get_partitions.py
142c27b7a317162938e1d5a46ece0261d4c24fd9
[]
no_license
https://github.com/Akshay-s-raut/Python
509d97e717720c9c49458d66412ead875eec82ce
b7ee9eb7342b956d1a2640f0c14c4b7bb2a41c1d
refs/heads/master
2021-08-28T07:21:37.532063
2021-07-31T16:23:06
2021-07-31T16:23:06
218,053,941
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
print("Ferrers Diagram for all Partitions of N") part=list() partsi=list() #try to memoize the recurrence for efficiency def getPartitions(n,max): sum = 0 if n==0: partj=partsi.copy() part.append(partj) del partsi[-1] return 1 elif n<0: del partsi[:] return 0 else: for i in range(1,n+1): if(i<=max): partsi.append(i) sum = sum + getPartitions(n-i,i) try: del partsi[-1] return sum except: return sum N = int(input("Enter the Number N: ")) print() getPartitions(N,N+1) def ferrers_diagram(a): s='' for i in a: s = s + '.'*i + '\n' return s.lstrip() for i in part: s = '' for j in i: s = s + ' {}'.format(j) print(s.lstrip()) print(ferrers_diagram(i)) print("Number of parts = ",len(part)) print() print(part)
UTF-8
Python
false
false
923
py
78
get_partitions.py
74
0.512459
0.501625
0
43
20.465116
48
betoma/advent-2020-python
3,925,600,109,523
8cbcbe7812237df46918fa38cfc51f10d1d6e837
10fe2707f9d266976be299db621b4ea5a5cbeedc
/11/advent-11.py
40a381a3e1d78420da0210181733d3b82e76d0e6
[]
no_license
https://github.com/betoma/advent-2020-python
6db2c6f20ba63fcb4b6caafc5987a43cd185285e
0e8b8a3287aed6f73aa583ac6e30138da24b8363
refs/heads/master
2023-08-15T15:29:29.170091
2021-10-14T12:10:12
2021-10-14T12:10:12
417,116,073
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import annotations import itertools class Place: def __init__(self, row: int, column: int): self.location = (row, column) def __repr__(self): return self.symbol def __str__(self): return f"Seat({self.symbol}: {self.location})" def __eq__(self, other): return ( type(self) is type(other) and self.occupied == other.occupied and self.location == other.location ) class Seat(Place): def __init__( self, row: int, column: int, room: WaitingRoom, occupied: bool = False, threshold: int = 4, ): super().__init__(row, column) self.waiting = room self.occupied = occupied self.threshold = threshold self.seat = True @property def symbol(self): if self.occupied: return "#" else: return "L" def adjacent_seats(self): x, y = self.location possible_x = [x] possible_y = [y] if x > 0: possible_x.append(x - 1) if y > 0: possible_y.append(y - 1) if x < self.waiting.rows - 1: possible_x.append(x + 1) if y < self.waiting.columns - 1: possible_y.append(y + 1) allowed_spots = [ s for s in itertools.product(possible_x, possible_y) if s != self.location ] return [self.waiting.room[i][j] for i, j in allowed_spots] def too_full(self): return len([s for s in self.adjacent_seats() if s.occupied]) >= self.threshold def all_empty(self): return all([not s.occupied for s in self.adjacent_seats()]) def change_status(self): if self.occupied and self.too_full(): return self.__class__( self.location[0], self.location[1], self.waiting, occupied=False ) elif (not self.occupied) and self.all_empty(): return self.__class__( self.location[0], self.location[1], self.waiting, occupied=True ) else: return self class Floor(Place): def __init__(self, row: int, column: int): super().__init__(row, column) self.occupied = False self.seat = False @property def symbol(self): return "." def change_status(self): return self class SmarterSeat(Seat): def __init__( self, row: int, column: int, room: WaitingRoom, occupied: bool = False, threshold: int = 5, ): super().__init__(row, column, room, occupied, threshold) def adjacent_seats(self): adj_places = super().adjacent_seats() seats_that_matter = [s for s in adj_places if s.seat] empty_places = [ (s, (s.location[0] - self.location[0], s.location[1] - self.location[1])) for s in adj_places if not s.seat ] while empty_places: look_further = [] for spot, loc in empty_places: x = spot.location[0] y = spot.location[1] new_x, new_y = (x + loc[0], y + loc[1]) if ( new_x >= 0 and new_x <= self.waiting.rows - 1 and new_y >= 0 and new_y <= self.waiting.columns - 1 ): look_further.append((self.waiting.room[new_x][new_y], loc)) seats_that_matter.extend([s[0] for s in look_further if s[0].seat]) empty_places = [s for s in look_further if not s[0].seat] return seats_that_matter class WaitingRoom: def __init__(self, filename: str): with open(filename) as f: contents = [line.strip() for line in f] self.room = [] for i, line in enumerate(contents): row = [] for j, cell in enumerate(line): if cell == "L": # part one # row.append(Seat(i, j, self)) # part two row.append(SmarterSeat(i, j, self)) elif cell == ".": row.append(Floor(i, j)) self.room.append(row) def __str__(self): return "\n".join(["".join([repr(s) for s in row]) for row in self.room]) @property def rows(self): return len(self.room) @property def columns(self): return len(self.room[0]) def n_occupied(self): return len([seat for row in self.room for seat in row if seat.occupied]) def churn(self): unstable = True while unstable: yield self new_room = [[s.change_status() for s in row] for row in self.room] if all([row == self.room[i] for i, row in enumerate(new_room)]): unstable = False self.room = new_room r = WaitingRoom("input.txt") for _ in r.churn(): pass print(r.n_occupied())
UTF-8
Python
false
false
5,033
py
23
advent-11.py
23
0.505265
0.499305
0
176
27.596591
86
yekang-wu/omega-miya
10,496,900,115,237
ee66e9e0ab77ef04e5ab89b3e4ac3b7da43855ab
691c70d88aa242ef97c2b5587de210e94854148a
/omega_miya/utils/bilibili_utils/data_classes.py
fb5fd76d0074c147c7567b6ff49fbfaf13190fd9
[ "Python-2.0", "MIT" ]
permissive
https://github.com/yekang-wu/omega-miya
bf632e28c788f06c74b61056142de23da9201282
53a6683fccb0618e306abe9e103cec78445f3796
refs/heads/master
2023-08-27T13:54:56.756272
2021-10-07T11:31:23
2021-10-07T11:31:23
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from dataclasses import dataclass from typing import Optional, Dict, List from omega_miya.database import Result class BiliInfo(object): @dataclass class UserInfo: user_id: int name: str sex: str face: str sign: str level: int @property def uid(self): return self.user_id @property def mid(self): return str(self.user_id) @dataclass class LiveRoomInfo: room_id: int short_id: int user_id: int status: int url: str title: str live_time: str cover_img: str @property def uid(self): return self.user_id @property def mid(self): return str(self.user_id) @dataclass class DynamicInfo: @dataclass class DynamicCard: content: str pictures: List[str] title: Optional[str] description: Optional[str] dynamic_id: int user_id: int user_name: str type: int desc: str url: str orig_dy_id: int orig_type: int data: DynamicCard @property def uid(self): return self.user_id @property def mid(self): return str(self.user_id) class BiliResult(object): @dataclass class UserInfoInfoResult(Result.AnyResult): result: Optional[BiliInfo.UserInfo] def __repr__(self): return f'<UserInfoInfoResult(error={self.error}, info={self.info}, result={self.result})>' @dataclass class LiveRoomInfoResult(Result.AnyResult): result: Optional[BiliInfo.LiveRoomInfo] def __repr__(self): return f'<LiveRoomInfoResult(error={self.error}, info={self.info}, result={self.result})>' @dataclass class LiveRoomDictInfoResult(Result.AnyResult): result: Optional[Dict[int, BiliInfo.LiveRoomInfo]] def __repr__(self): return f'<LiveRoomInfoDictResult(error={self.error}, info={self.info}, result={self.result})>' @dataclass class DynamicInfoResult(Result.AnyResult): result: Optional[BiliInfo.DynamicInfo] def __repr__(self): return f'<DynamicInfoResult(error={self.error}, info={self.info}, result={self.result})>'
UTF-8
Python
false
false
2,375
py
152
data_classes.py
149
0.572211
0.572211
0
99
22.989899
106
aleph-im/py-libp2p
17,325,898,072,187
1237b458645261f7d7ae6976b1d6075051a91965
09cc5321dc37b6ffefeac6c3e41d942264caaf25
/libp2p/network/connection/raw_connection.py
08d2205528124dbd980090e4b2eb0abef64e36ae
[ "Apache-2.0", "MIT" ]
permissive
https://github.com/aleph-im/py-libp2p
460af6347ce8f293f8a2eff8c0c4dcd8e51c6f0d
3dfb5c49abf37fd95db12ae5766757332be70d08
refs/heads/master
2021-07-04T14:08:53.562330
2020-10-27T10:23:18
2020-10-27T10:23:18
212,055,838
0
2
NOASSERTION
true
2020-12-14T10:10:39
2019-10-01T09:16:19
2020-10-27T10:23:22
2020-12-14T10:08:35
1,673
0
0
1
Python
false
false
import asyncio from .exceptions import RawConnError from .raw_connection_interface import IRawConnection class RawConnection(IRawConnection): reader: asyncio.StreamReader writer: asyncio.StreamWriter is_initiator: bool _drain_lock: asyncio.Lock def __init__( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, initiator: bool, ) -> None: self.reader = reader self.writer = writer self.is_initiator = initiator self._drain_lock = asyncio.Lock() async def write(self, data: bytes) -> None: """Raise `RawConnError` if the underlying connection breaks.""" try: self.writer.write(data) except ConnectionResetError as error: raise RawConnError(error) # Reference: https://github.com/ethereum/lahja/blob/93610b2eb46969ff1797e0748c7ac2595e130aef/lahja/asyncio/endpoint.py#L99-L102 # noqa: E501 # Use a lock to serialize drain() calls. Circumvents this bug: # https://bugs.python.org/issue29930 async with self._drain_lock: try: await self.writer.drain() except ConnectionResetError as error: raise RawConnError(error) async def read(self, n: int = -1) -> bytes: """ Read up to ``n`` bytes from the underlying stream. This call is delegated directly to the underlying ``self.reader``. Raise `RawConnError` if the underlying connection breaks """ try: return await self.reader.read(n) except ConnectionResetError as error: raise RawConnError(error) async def close(self) -> None: self.writer.close() await self.writer.wait_closed()
UTF-8
Python
false
false
1,780
py
55
raw_connection.py
52
0.629775
0.606742
0
55
31.363636
149
acoderly/mishell
7,490,422,970,566
c79b5aaaf211ed596bb331767406083f7fa05906
c1ae685bbdad867cf42ac4b947f7b8bd8ecf08e8
/src/mishell/shell_family/alpha.py
0548dbdc5f67f25134d7eda2ae0fdd27e6d88b35
[ "MIT" ]
permissive
https://github.com/acoderly/mishell
85b523f90d3be00a6d30b9493dc52be62cf2d5fc
035fc618cb1abfaeb9f0d519b6ff78c48494ff62
refs/heads/main
2023-04-22T05:31:40.470785
2021-04-29T03:11:36
2021-04-29T03:11:36
360,453,911
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import inspect import mishell import pkgutil from os.path import isabs class Alpha: def __init__(self): pass def __init_subclass__(cls, **kwargs): name = getattr(cls, "NAME", cls.__name__.lower()) file_and_name = inspect.getfile(cls) + "::" + name ShellFamily.loaded_family[file_and_name] = cls def do(self, data, *args, **kwargs): raise NotImplementedError() def get_process_result(self): raise NotImplementedError() family_loaded = set() def load(): global family_loaded paths = mishell.shell_family.__path__ paths = {p for p in paths if isabs(p) and p not in family_loaded} if len(paths) == 0: print(f"paths is empty.") return modules_to_load = [] for finder, name, _ in pkgutil.iter_modules(paths): found_module = finder.find_module(name) modules_to_load.append((name, found_module)) for (name, module) in sorted(modules_to_load, key=lambda x: x[0]): try: _ = module.load_module(name) except Exception as e: print(f"Could not load family at '{name}':{e}") family_loaded.update(paths) class ShellFamily: loaded_family = {} def __init__(self): self.registered_family = {} load() def initialize(self): for item in self.loaded_family.values(): self.register_family(item) def register_family(self, family_class): name = getattr(family_class, "NAME", family_class.__name__.lower()) family = family_class() self.registered_family[name] = family print(f"Successfully registered plugin '{name}'") def get_all_family(self): return [cls for cls in self.registered_family.values()] def get(self, family_name): return self.registered_family.get(family_name, None) def get_all_family_name(self): return [name for name in self.registered_family.keys()]
UTF-8
Python
false
false
1,952
py
15
alpha.py
11
0.61168
0.610656
0
74
25.378378
75
mcvayokay/symmv
17,686,675,325,347
5adb085be0d2c3807274755a762e0ca63cbc65fb
e1eb5ebfc117f3f6532d88a378439101b9f337d2
/symmv/logging.py
eeed0137b5728de4a09ab00452bb2bd3d7e2d848
[]
no_license
https://github.com/mcvayokay/symmv
6a7684ab799edbb8eb988df23b877d43c2586d39
cad3f6ff15726deb9efd87eb748f0116cd4e957f
refs/heads/master
2021-08-16T15:39:04.366333
2017-11-20T03:40:13
2017-11-20T03:40:13
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging import logging.config def configure_logging(level, filename): logging.config.dictConfig( { 'version' : 1, 'handlers' : { 'console' : { 'class' : 'logging.StreamHandler', 'formatter' : 'default', 'level' : level, 'stream' : 'ext://sys.stdout' }, 'file' : { 'class' : 'logging.handlers.RotatingFileHandler', 'formatter': 'default', 'level' : level, 'filename' : filename, 'maxBytes' : 4096, 'backupCount' : 3 } }, 'formatters' : { 'default' : { 'format' : '%(asctime)s | %(levelname)s | %(message)s' } }, 'loggers' : { __name__ : { 'handlers' : ['console', 'file'] } } } ) configure_logging(logging.DEBUG, '/home/kmcvay/tmp/symlink_manager.log') logger = logging.getLogger(__name__)
UTF-8
Python
false
false
1,177
py
18
logging.py
18
0.383178
0.37808
0
40
28.4
74