repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nenusoulgithub/LearnPython | 2,207,613,200,032 | e09a4a72198c3fe77673a352133e71112c43ac61 | 3459b443b52544615a0b99a6cefa8ff0b2498a58 | /尚学堂/并发编程/P3_01-进程池.py | 8f911ed4d60ff6a5fb01fdf3f8ececd8680c4bf5 | []
| no_license | https://github.com/nenusoulgithub/LearnPython | daac90af22b3575b2b29444eac423a6134a796d8 | 734cb81440d03088e12239c3ef8ecb525286f30f | refs/heads/main | 2023-02-01T08:34:47.038816 | 2020-12-05T00:19:49 | 2020-12-05T00:19:49 | 304,339,313 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 进程池
import os
import time
from multiprocessing import Pool
def work(name):
for i in range(10):
print("进程{}:{}开始工作...".format(os.getpid(), name))
time.sleep(0.5)
if __name__ == '__main__':
pool = Pool(8)
for i in range(10):
pool.apply_async(work, args=("{}号员工".format(i + 1),))
# 关闭进程池,只是不在接受新的进程,原有的进程继续执行
pool.close()
pool.join() | UTF-8 | Python | false | false | 464 | py | 89 | P3_01-进程池.py | 80 | 0.569948 | 0.549223 | 0 | 21 | 17.428571 | 61 |
nobe0716/problem_solving | 326,417,530,191 | 32820f735b2ae234710648c3e024f40465f5fa6a | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /codeforces/contests/1334/C. Circle of Monsters.py | 0b450a589400d671d2063cff1de6b0d29694f411 | []
| no_license | https://github.com/nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from collections import namedtuple
AB = namedtuple('AB', ['a', 'b'])
t = int(sys.stdin.readline().strip())
def solve(n, abs):
rest_points = 0
min_rest_point = float('inf')
for i in range(n):
a, b = abs[i].a, abs[i - 1].b
if a > b:
rest_points += (a - b)
min_rest_point = min(min_rest_point, b)
else:
min_rest_point = min(min_rest_point, a)
return rest_points + min_rest_point
responses = []
for _ in range(t):
n = int(sys.stdin.readline().strip())
abs = []
for _ in range(n):
a, b = map(int, sys.stdin.readline().strip().split())
abs.append(AB(a, b))
r = solve(n, abs)
responses.append(r)
print('\n'.join(map(str, responses)))
| UTF-8 | Python | false | false | 757 | py | 902 | C. Circle of Monsters.py | 897 | 0.541612 | 0.53897 | 0 | 32 | 22.65625 | 61 |
jingwanha/algorithm-problems | 1,760,936,593,175 | 10b23f58d8d86109d7e6cd03e14427887a9f6b3c | 60822122f562f01615710bc51d04bccee1ca0ba0 | /programers/전화번호목록_lv2.py | f3d61a56f538916021214f0d7607b194e217be92 | []
| no_license | https://github.com/jingwanha/algorithm-problems | 4e5bb6b24cd3c5e2dbd8379f954cf0879629f0ad | eb0bbb3219498c8c2f8b0330caf8c838b83a5d0d | refs/heads/main | 2023-07-29T19:30:37.833087 | 2021-09-14T05:46:50 | 2021-09-14T05:46:50 | 340,888,090 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://programmers.co.kr/learn/courses/30/lessons/42577
def solution(phone_book):
answer = True
sorted_phone_book = sorted(phone_book)
for idx, number in enumerate(sorted_phone_book[:-1]):
next_number = sorted_phone_book[idx + 1]
if (len(number) <= len(next_number)) and (number == next_number[:len(number)]):
answer=False
return answer
return answer
if __name__=='__main__':
phone_book = ["119", "97674223", "1195524421"]
res = solution(phone_book)
print(res)
| UTF-8 | Python | false | false | 543 | py | 45 | 전화번호목록_lv2.py | 43 | 0.605893 | 0.550645 | 0 | 22 | 23.545455 | 87 |
jendrikjoe/UdacityCapstone | 16,200,616,646,355 | 4dbe6fbcfa8d94d449c94a53732a501a2a1a8ca5 | 8211bf7722255b52535babe670b672a4fa2e8be5 | /ros/src/trajectory_plotter/trajectory_plotter.py | aa486cf755547afb2698d221600764d1fedc0d59 | []
| no_license | https://github.com/jendrikjoe/UdacityCapstone | 66e3b9799b486613b8bb4b5ae05ac7a3db672e31 | 865ca0a9c28dbd6495fe4c58de09316dc1f5778c | refs/heads/master | 2021-01-16T20:02:46.856038 | 2017-12-31T05:00:34 | 2017-12-31T05:00:34 | 100,192,651 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
from geometry_msgs.msg import PoseStamped, Quaternion
from styx_msgs.msg import Lane, Waypoint
import rospy
from matplotlib import pyplot as plt
import math
import tf
import numpy as np
class TrajectoryPlotter(object):
def __init__(self):
rospy.init_node('waypoint_updater')
self.baseWaypoints = None
rospy.Subscriber('/current_pose', PoseStamped, self.position_cb)
rospy.Subscriber('/final_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.position = [0,0,0]
self.yaw = 0
self.speed = 0
self.targetLane = 1
self.currentWPIndex = -1
self.show_plot = False
if self.show_plot:
self.plot_trajectory()
def plot_trajectory(self):
fig = plt.figure()
self.ax = fig.gca()
self.ax.set_title('Trajectory')
self.ax.set_xlabel('x')
self.ax.set_xlabel('y')
plt.show(block=True)
def position_cb(self, msg):
self.position = [msg.pose.position.x,
msg.pose.position.y, msg.pose.position.z]
orientation=(msg.pose.orientation.x, msg.pose.orientation.y,
msg.pose.orientation.z, msg.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(orientation)
self.yaw = euler[2]
#rospy.logerr('yaw:%.3f' % self.yaw)
self.yaw = self.yaw if self.yaw < np.pi else self.yaw - 2*np.pi
@staticmethod
def quaternion_from_yaw(yaw):
return tf.transformations.quaternion_from_euler(0., 0., yaw)
def waypoints_cb(self, lane):
self.finalWaypoints = []
localX, localY = self.getXY(lane.waypoints)
self.ax.cla()
self.ax.plot(localX, localY)
self.ax.plot(self.position[0], self.position[1], marker = 'o', ms =5, color='r')
plt.draw()
def getXY(self, waypoints):
xs = []
ys = []
for waypoint in waypoints:
x = self.getX(waypoint)
y = self.getY(waypoint)
xs.append(x)
ys.append(y)
return xs, ys
def getLocalXY(self, waypoints):
localXs = []
localYs = []
for waypoint in waypoints:
x = self.getX(waypoint)
y = self.getY(waypoint)
x = x - self.position[0]
y = y - self.position[1]
localX = x*math.cos(self.yaw) + y * math.sin(self.yaw)
localY = -x*math.sin(self.yaw) + y * math.cos(self.yaw)
localXs.append(localX)
localYs.append(localY)
return localXs, localYs
@staticmethod
def getX(waypoint):
return waypoint.pose.pose.position.x
@staticmethod
def getYaw(waypoint):
orientation=(waypoint.pose.pose.orientation.x, waypoint.pose.pose.orientation.y,
waypoint.pose.pose.orientation.z, waypoint.pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(orientation)
return euler[2]
@staticmethod
def getY(waypoint):
return waypoint.pose.pose.position.y
@staticmethod
def getVelocity(waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
"""def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)
for i in range(wp1, wp2 + 1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist"""
if __name__ == '__main__':
try:
TrajectoryPlotter()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| UTF-8 | Python | false | false | 3,899 | py | 20 | trajectory_plotter.py | 13 | 0.59246 | 0.585022 | 0 | 123 | 30.699187 | 91 |
hllanosp/tetris | 11,467,562,708,183 | 7d919a5a981fa30a4b6a70f8d2bc25f539d4dd09 | 77eb21f102c0c4a7bfcb0b1375bcc3b4d5c19593 | /pieza.py | d045b038b326b07ca306803388a084943097065f | []
| no_license | https://github.com/hllanosp/tetris | dfcd8a5c1852c9cb123054c25d91f4a6ebda2297 | d07f88f7b3ba0e5c062d67311ecc7e10f4592d7f | refs/heads/master | 2021-01-13T02:14:33.078013 | 2016-04-29T02:11:46 | 2016-04-29T02:11:46 | 35,199,677 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 hllanos <hllanos@pcllanos>
import pygame
"""
clase que representa los sprites del juego
contiene las propiedades y el comportamiento de los sprites
propiedades:
image -- define la imagen del sprite
rect -- define el rectangulo y la posicion del sprite
mascara -- propiedad para determinar colision en sprites
left -- alejamiento del sprite con respecto al marco izquierdo
top -- altura del sprite con respecto a la altura del marco de juego
rapido -- velocidad de moviento del sprite
"""
class figura (pygame.sprite.Sprite):
def __init__(self, dibujo, posx, posy):
pygame.sprite.Sprite.__init__(self)
self.image = dibujo
self.rect = dibujo.get_rect()
self.mascara = pygame.mask.from_surface(self.image)
self.rect.left = posx
self.rect.top = posy
self.rapido = False
#self.image.set_colorkey((255,255,255))
def update(self, tecla):
if(tecla == pygame.K_UP):
self.image = pygame.transform.rotate(self.image, -90)
if(tecla == pygame.K_LEFT):
self.rect.move_ip(-25,0)
if(tecla == pygame.K_RIGHT):
self.rect.move_ip(25,0)
if(tecla == pygame.K_DOWN):
if self.rapido == False:
self.rect.move_ip(0,2)
else:
self.rect.move_ip(0,5)
class ayuda(pygame.sprite.Sprite):
def __init__(self, dibujo):
pygame.sprite.Sprite.__init__(self)
self.image = dibujo
self.mascara = pygame.mask.from_surface(self.image)
def update(self, tecla):
if (tecla == pygame.K_UP):
self.image = pygame.transform.rotate(self.image, -90) | UTF-8 | Python | false | false | 1,598 | py | 13 | pieza.py | 8 | 0.673342 | 0.65582 | 0 | 62 | 24.790323 | 69 |
nestyme/Subtitles-generator | 5,755,256,188,657 | 34a54be940cf01b769e2cc02948ff27e5f635b18 | 2068fc1553de03a088c08929f3bd8fb04482933f | /vk_transcript_bot.py | 3867c20072f98b0a3ed58dc451d55155fb6914b0 | []
| no_license | https://github.com/nestyme/Subtitles-generator | 8876f560e548ea019f1f584022e64496464d9b7d | 12eb6a2aa32be1bcc565cb7b4919cfd3955a4382 | refs/heads/master | 2023-07-20T05:42:38.924184 | 2022-02-01T21:35:25 | 2022-02-01T21:35:25 | 196,001,289 | 82 | 31 | null | false | 2023-07-06T21:20:18 | 2019-07-09T12:09:36 | 2023-05-16T02:25:53 | 2023-07-06T21:20:17 | 19 | 73 | 27 | 6 | Python | false | false | import requests
import subprocess
import vk_api
from vk_api import VkUpload
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api.utils import get_random_id
import os
vk_session = vk_api.VkApi(token='')
vk = vk_session.get_api()
longpoll = VkLongPoll(vk_session)
def main():
session = requests.Session()
vk = vk_session.get_api()
for event in longpoll.listen():
text = 'try again.'
try:
if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:
print('id{}: "{}"'.format(event.user_id, event.text), end=' ')
subprocess.check_output('python3 download_video.py -url {}'.format(event.text),
shell = True)
vk.messages.send(
user_id=event.user_id,
random_id=get_random_id(),
message='video uploaded on server'
)
subprocess.check_output('python3 recognize.py',
shell = True)
vk.messages.send(
user_id=event.user_id,
random_id=get_random_id(),
message='video recognized'
)
text = open('result.txt', 'r', encoding='utf-8').read()
if len(text) < 4000:
vk.messages.send(
user_id=event.user_id,
random_id=get_random_id(),
message=text)
else:
text_main = text[:int(len(text)/4000)*4000]
text_res = text[int(len(text)/4000)*4000:int(len(text)/4000)*4000+int(len(text)%4000)]
for i in range(0,len(text_main),4000):
vk.messages.send(
user_id=event.user_id,
random_id=get_random_id(),
message=text[i:i+4000]
)
vk.messages.send(user_id=event.user_id,
random_id=get_random_id(),
message=text_res
)
print('ok')
os.system('rm samples/*')
os.system('rm tmp.*')
os.system('rm current.wav')
if not text:
vk.messages.send(
user_id=event.user_id,
random_id=get_random_id(),
message='wrong format'
)
print('no results')
continue
except:
vk.messages.send(
user_id=event.user_id,
random_id=get_random_id(),
message='try again'
)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,913 | py | 6 | vk_transcript_bot.py | 4 | 0.43323 | 0.418126 | 0 | 73 | 38.90411 | 106 |
JetBrains/intellij-community | 5,437,428,623,650 | f71e2dc42cc1fa9646e593e7573c6be5e116a1ac | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/openpyxl/openpyxl/comments/shape_writer.pyi | db8b9133a869fd6d18b1bbf9ee8722509889dbde | [
"Apache-2.0",
"MIT"
]
| permissive | https://github.com/JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | false | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | 2023-09-12T03:37:30 | 2023-09-12T06:46:46 | 4,523,919 | 15,754 | 4,972 | 237 | null | false | false | from typing import Any
vmlns: str
officens: str
excelns: str
class ShapeWriter:
vml: Any
vml_path: Any
comments: Any
def __init__(self, comments) -> None: ...
def add_comment_shapetype(self, root) -> None: ...
def add_comment_shape(self, root, idx, coord, height, width) -> None: ...
def write(self, root): ...
| UTF-8 | Python | false | false | 341 | pyi | 127,182 | shape_writer.pyi | 70,394 | 0.624633 | 0.624633 | 0 | 14 | 23.357143 | 77 |
ikim-quantum/optimization_sparse | 781,684,094,781 | 76b2c4fdb6f3b4d01a0e31c008399fa655a7f189 | 1b1a3d9eece9648f7b544966fe6c2d28334a6968 | /optimization.py | 1e66e790e48f70c8281cfafa1bbf9f2c40f76ab1 | [
"MIT"
]
| permissive | https://github.com/ikim-quantum/optimization_sparse | c1f4da15da84806bb558752943b872a8d73df3a4 | 4cdb31ec0d39c3836572b10d5010010cd4c50b8c | refs/heads/master | 2020-03-29T17:31:22.482854 | 2018-11-27T19:00:21 | 2018-11-27T19:00:21 | 150,167,733 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from qaoa import qaoa_maxcut_randsample, qaoa_maxcut
from sparse_reconstruction import reconstruct_from_signal
import matplotlib.pyplot as plt
def qaoa_maxcut_opt_single(
edges, angles, which, sample_size, sample_size_reduced, shots=1000, draw=False):
"""
Optimizes a single angle for the MAXCUT QAOA.
Args:
edges (list): Edges that define the graph of the MAXCUT problem.
angles (list): QAOA angles.
which (int): The index of the angle we are optimizing.
sample_size (int): Number of spacings for the variable angle.
sample_size_reduced (int): Reduced number of samples over the
variable angle.
shots (int): Total number of single-shot measurements.
Returns:
angles (list): Optimized angles.
"""
randangles, randsignals = qaoa_maxcut_randsample(
edges, angles, shots=shots, which=which, sample_size=sample_size,
sample_size_reduced=sample_size_reduced)
# Estimate the error in 2-norm.
error = pow(len(edges), 2) * len(randangles) / shots
# Reconstruct the signal
sparse_signal = reconstruct_from_signal(
randangles, randsignals, error=0)
# Find the index that maximizes the signal.
idx = np.argmax(sparse_signal)
angle_opt = idx * 2 * np.pi / len(sparse_signal)
if draw:
plt.plot(sparse_signal)
plt.plot(idx, sparse_signal[idx], marker='o', markersize=3, color="red")
plt.show()
return angle_opt
def sparse_maxcut_opt(edges, angles, sample_size,
sample_size_reduced, shots=1000, sweeps=5, draw=False):
"""
Optimizing each angles sequentially for the MAXCUT QAOA
Args:
edges (list): Edges that define the graph of the MAXCUT problem.
angles (list): QAOA angles.
sample_size (int): Number of spacings for the variable angle.
sample_size_reduced (int): Reduced number of samples over the
variable angle.
shots (int): Total number of single-shot measurements.
sweeps (int): Total number of sweeps.
Returns:
angles (list): List of optimal angles.
"""
for i in range(sweeps):
for j in range(len(angles)//2):
angles[j] = qaoa_maxcut_opt_single(
edges, angles, j, sample_size, sample_size_reduced,
shots=shots, draw=draw)
angles[j+len(angles)//2] = qaoa_maxcut_opt_single(
edges, angles, j+len(angles)//2, sample_size, sample_size_reduced,
shots=shots, draw=draw)
current_value = qaoa_maxcut(edges, angles, 1000)
print(
"{}th sweep, {}th angle update, sample size={}: {}".format(
i, j, shots * sample_size_reduced * (i * len(angles) + j),current_value))
print("Total number of samples: {}".format(shots * sample_size_reduced * sweeps * len(angles)))
return angles
def sparse_maxcut_opt_simple(edges, angles, shots=1000, sweeps=5, draw=False):
"""
Optimizing each angles sequentially for the MAXCUT QAOA
Args:
edges (list): Edges that define the graph of the MAXCUT problem.
angles (list): QAOA angles.
shots (int): Total number of single-shot measurements.
sweeps (int): Total number of sweeps.
Returns:
angles (list): List of optimal angles.
"""
sample_size = 100
sample_size_reduced = int(np.sqrt(sample_size))
for i in range(sweeps):
for j in range(len(angles) // 2):
angles[j] = qaoa_maxcut_opt_single(
edges, angles, j, sample_size, sample_size_reduced,
shots=sample_size_reduced, draw=draw)
angles[j + len(angles)//2] = qaoa_maxcut_opt_single(
edges, angles, j+len(angles)//2, sample_size, sample_size_reduced,
shots=sample_size_reduced, draw=draw)
current_value = qaoa_maxcut(edges, angles, 1000)
print(
"{}th sweep, {}th angle update, sample size={}: {}".format(
i, j, shots * (i * len(angles) + j),current_value))
print("Total number of samples: {}".format(shots * sweeps * len(angles)))
return angles
def spsa_maxcut_opt(edges, angles, shots, alpha = 0.602, gamma=0.101, itt=1000):
"""
Optimizing a MAXCUT problem with SPSA.
Args:
edges (list): Edges that define the graph of the MAXCUT problem.
angles (list): QAOA angles.
shots (int): Total number of single-shot measurements.
alpha (float): Parameter for SPSA
gamma (float): Parameter for SPSA
itt (int): Total number of iterations
"""
for k in range(itt):
c_k = 0.01 / pow((1+k), gamma)
a_k = 0.01 / pow((1+k), alpha)
randombit = np.random.randint(2, size=len(angles))
randpm = 2* randombit - 1
angles_p = angles + c_k * randpm
angles_m = angles - c_k * randpm
g_k = (qaoa_maxcut(edges, angles_p, shots=shots) - qaoa_maxcut(edges, angles_m)) / (2*c_k)
angles = [angle + g_k * mybit for (angle, mybit) in zip(angles,randpm)]
total_shots = k * shots * 2
print("Total shots = {}: {}".format(total_shots, qaoa_maxcut(edges, angles)))
#def powell_maxcut_opt(edges, angles, shots):
| UTF-8 | Python | false | false | 5,382 | py | 15 | optimization.py | 6 | 0.603865 | 0.592531 | 0 | 134 | 39.126866 | 99 |
wilsonrocks/civulator | 14,731,737,870,758 | b83e23aa89bea7a6ad080e865f1900c89073283a | 3cacea0f49608b821261c1ebdcf9d6903dbbbe13 | /civtool.py | 5223d59d889b24c96a27f1b8f1699a2bbbc9fe05 | []
| no_license | https://github.com/wilsonrocks/civulator | cfdb057f72f12fee3f636e088591d736e9904e19 | 59ee8da4a21e9bc8c170c6f4f8e62014f17fc27d | refs/heads/master | 2020-05-27T15:05:29.347233 | 2017-05-30T22:08:42 | 2017-05-30T22:08:42 | 82,561,916 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
# Change working directory so relative paths (and template lookup) work again
#os.chdir(os.path.dirname(__file__))
import re
import pprint
import bottle
ignorecomment = re.compile(r';.*')
unitdef = re.compile(r'\[unit_(?P<unitname>\w+)')
assignment = re.compile(r'(?P<name>\w+)\s*=\s*(?P<data>.*)')
vetnames = re.compile(r'veteran_names = _\("(\w*)"\), _\("(\w*)"\), _\("(\w*)"\), _\("(\w*)"\)')
vetpower = re.compile(r'veteran_power_fact = (\d+), (\d+), (\d+), (\d+)')
terrainblock = re.compile(r'\[terrain_(?P<terrainname>\w+)\]')
units = {}
unit_data_to_get = ["class","attack","defense","hitpoints","firepower"]
terrain_data_to_get = ["name","class","defense_bonus","native_to"]
terrains = {}
veterankeys = []
veteranvalues = []
#Do all the stuff for UNITS
with open('units.ruleset') as unitsfile:
for line in unitsfile:
if vetnames.match(line):
for n in range(1,5):
veterankeys.append(vetnames.match(line).group(n))
if vetpower.match(line):
for n in range(1,5):
value = (int(vetpower.match(line).group(n))/100)
veteranvalues.append(value)
match = unitdef.match(line)
if match: # if we have a unit definition, do the following:
unitname = match.group("unitname").replace("_"," ")
unitname = unitname.replace("aegis","AEGIS")
units[unitname] = {}
unitdict = units[unitname]
thisline = ""
while not thisline == "\n":
capture = assignment.match(thisline)
if capture:
name = capture.group("name")
data = capture.group("data")
if name in unit_data_to_get:
try:
data = int(data)
except ValueError:
pass
unitdict[name]=data
try:
thisline = next(unitsfile)
except StopIteration:
print("Iteration ran out!")
veterans=dict(zip(veterankeys,veteranvalues))
# do all the stuff for TERRAIN
with open('terrain.ruleset') as terrainfile:
for line in terrainfile:
match = terrainblock.match(line)
if match:
terrainname = match.group("terrainname")
terrains[terrainname] = {}
terraindict = terrains[terrainname]
thisline = ""
while not thisline == "\n":
capture = assignment.match(thisline)
if capture:
name = capture.group("name")
data = capture.group("data")
if name in terrain_data_to_get:
terraindict[name]=data
try:
thisline = next(terrainfile)
except StopIteration:
print("Iteration ran out!")
# strip extra stuff
for terrainname in terrains:
terrain = terrains[terrainname]
terrain["class"] = terrain["class"][1:-1] #strip first and last character (remove "s)
terrain["defense_bonus"] = (int(terrain["defense_bonus"])+100)/100
terrain["name"] = terrain["name"][3:-2].replace('_',' ')
for key in terrains.keys():
terrains[key.replace('_',' ')]=terrains.pop(key)
import json
open("civJSON","w").write(json.dumps([data,terrains,veterans,units])) #TODO change so that it loads from this instead of ruleset files if ruleset files haven't been updated
#print(data)
#print(terrains)
#print(veterans)
print(units)
def dofight(data):
#work through the calculations, adding each one to data dictionary, so they can be referenced in the form.
#attacker stuff
data["attackervalue"] = units[data["attacker"]]["attack"]
data["attackerlevelmultiplier"] = veterans[data["attackerlevel"]]
data["attackerlevelvalue"] = data["attackervalue"]*data["attackerlevelmultiplier"]
#defender stuff
data["defendervalue"] = units[data["defender"]]["defense"]
data["defenderlevelmultiplier"] = veterans[data["defenderlevel"]]
data["defenderlevelvalue"] = data["attackervalue"]*data["defenderlevelmultiplier"]
#terrain stuff
for d in data: print(d,data[d])
data["terrainmultiplier"] = terrains[data["terrain"]]["defense_bonus"]#TODO should only be if Land not Big Land
data["terrainvalue"] = data["terrainmultiplier"] * data["defenderlevelvalue"]
#fortified
if data["fortified"] and data["location"] != "in_city":
data["fortifiedmultiplier"] = 1.5
else:
data["fortifiedmultiplier"] = 1
data["fortifiedvalue"] = data["fortifiedmultiplier"] * data["terrainvalue"]
def do_checkbox(data,fieldname):
if data.get(fieldname,False):
data[fieldname] = True
else:
data[fieldname] = False
@bottle.route('/')
def index():
return(bottle.template("civform",unitlist=sorted(units.keys()),veteranlevels=veterankeys,terrains=sorted(terrains.keys())))
@bottle.post('/combat')
def combat():
data = bottle.request.params
checkboxes = ["greater_8", "walls", "coastal", "great_wall", "river","fortified"]
for box in checkboxes:
do_checkbox(data,box)
data["attackerclass"] = units["attacker"]["class"]
for d in data:
print(d,data[d])
dofight(data)
return(bottle.template("civresults",data))
#print("starting server")
bottle.run(host='192.168.1.32',port=8080,debug=True,reloader=True)
#application = bottle.default_app()
| UTF-8 | Python | false | false | 5,705 | py | 7 | civtool.py | 4 | 0.577388 | 0.571429 | 0 | 170 | 31.558824 | 172 |
tobyzhu/genesis-backend | 7,043,746,387,600 | 7766856ef385785441f3793e031e6e974990a958 | 88ebd9065d97419c0f817d1ff1472927d31fab51 | /common/connectsqlany.py | bbecde9aa4fb89182a5fc5b1da29eb383cecc912 | []
| no_license | https://github.com/tobyzhu/genesis-backend | a74948fbce8929881c985644880acf99bc8643cf | 620594dc677966e405e05ef502069f8dc40a6eff | refs/heads/master | 2023-08-17T07:52:46.043433 | 2023-08-15T13:49:41 | 2023-08-15T13:49:41 | 196,469,788 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sqlanydb
def connectdb(server):
if server == 'sqlany':
read = sqlanydb.connect(link="tcpip(host=localhost)",
ServerName='hdms',
uid='sa',
pwd='iHaVeFuN',
dbn='hdms')
return read
def disconnectdb(read):
read.close()
return 0
def ReadEmpl():
read = connectdb('sqlany')
Rcursor = read.cursor()
# 商品基本信息
readsql = " select ecode, ename " \
" from empl "
Rcursor.execute(readsql)
readResult = Rcursor.fetchall()
print(readResult)
ReadEmpl() | UTF-8 | Python | false | false | 667 | py | 358 | connectsqlany.py | 355 | 0.491603 | 0.490076 | 0 | 35 | 17.714286 | 61 |
KongChan1988/51CTO-Treasure | 9,560,597,208,795 | 069762b0624b0b37048497027b9d91961a61c254 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第二模块学习/Day03/常用模块/shutil_test.py | 0c826645e4d53646737de8b565d6f3e2c3393bab | []
| no_license | https://github.com/KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | false | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | 2019-07-24T15:28:53 | 2019-10-30T22:05:11 | 130,318 | 2 | 8 | 1 | Python | false | false | #-*- Coding:utf-8 -*-
# Author: D.Gray
'''
shutil.copyfilobj("f1","f2")----将f1文本内容复制给 "f2"
shutil.copyfile("本节笔记","笔记2") # shutil.copyfileobj()强化功能 自动打开文件并复制内容给另一个文件
shutil.copymode(src,dst)-----仅拷贝权限,内容、组、用户均不变
shutil.copystart(src,dst)------拷贝状态的所有信息,包括 modebits,atime,mtime...
shutil.copy(src,dst)-----
shutil.copytrue("dir1","dir2")-----递归的去拷贝文件目录
shutil.rmtree("dirname)------删除文件目录
shutil.make_archive("压缩包的文件名","压缩形式(zip、tar)","目标文件压缩路径")
例:
将E:\Python_Pycharm_work\第二模块学习\Day01\ATM" 路径下的ATM文件压缩到当前目录下
shutil.make_archive("shutil_archive_test","zip","E:\Python_Pycharm_work\第二模块学习\Day01\ATM")
'''
import shutil
# f1 = open("本节笔记",encoding='utf-8')
# f2 = open("笔记2",'w',encoding='utf-8')
# shutil.copyfileobj(f1,f2) #将本节笔记文本内容复制给 "笔记2"
# shutil.copyfile("本节笔记","笔记2") # shutil.copyfileobj()强化功能 自动打开文件并复制内容给另一个文件
# shutil.copystat("本节笔记","笔记3")
#shutil.copytree("a","new_a") #递归的去拷贝文件目录
#shutil.rmtree("new_a")
#shutil.make_archive("shutil_archive_test","zip","E:\Python_Pycharm_work\第二模块学习\Day01\ATM")
# import zipfile
# z = zipfile.ZipFile("day5.zip",'w') # 压缩"本节笔记和笔记2"两个文件到当前脚本目录下 压缩包名'day5.zip'
# z.write("本节笔记")
# print('----')
# z.write("笔记2") | UTF-8 | Python | false | false | 1,621 | py | 470 | shutil_test.py | 340 | 0.694421 | 0.670386 | 0 | 33 | 34.333333 | 91 |
garypaduana/ProjectEuler | 7,894,149,900,540 | 3689687c0e395c8aea15123500165e3b8d662b77 | 5a9c4c344d32c90df1ca00da5bad617f2800761f | /src/com/gp/projecteuler/problems/Problem091.py | 541fe966fbd4970f917aac3b1777b9a90abd9594 | []
| no_license | https://github.com/garypaduana/ProjectEuler | d353a2e23e2f6467884643cfd75ae33dd51afb48 | 906f98130be8ae2dca91717772e47c10b452d9f4 | refs/heads/master | 2021-01-19T19:32:40.150653 | 2017-05-17T06:38:10 | 2017-05-17T06:38:10 | 10,246,389 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Project Euler Solutions
Copyright (C) 2012-2013, Gary Paduana, gary.paduana@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import itertools
import datetime
import math
def main():
start = datetime.datetime.now()
right_triangles = set()
upper = 51
for x1 in range(0, upper):
for y1 in range(0, upper):
for x2 in range(0, upper):
for y2 in range(0, upper):
if(is_right_triangle(x1,y1,x2,y2)):
right_triangles.add(frozenset({(x1,y1),(x2,y2)}))
print "Answer:", len(right_triangles)
print "Duration:", (datetime.datetime.now() - start)
def is_right_triangle(x1, y1, x2, y2):
if((x1 == x2 and y1 == y2) or
(x1 == 0 and y1 == 0) or
(x2 == 0 and y2 == 0)):
return False
distances = []
distances.append((x1**2+y1**2)**(1./2.))
distances.append((x2**2+y2**2)**(1./2.))
distances.append(((x1-x2)**2+(y1-y2)**2)**(1./2.))
hyp = max(distances)
distances.remove(hyp)
difference = hyp**2 - (distances[0]**2 + distances[1]**2)
if(math.fabs(difference) <= 0.0000001):
return True
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,832 | py | 129 | Problem091.py | 121 | 0.611354 | 0.569869 | 0 | 56 | 31.714286 | 73 |
imscs21/myuniv | 9,148,280,349,998 | 8fd37d365262804e5301391eac0dae2a96f172db | efaa0c361e7abacc9efb8e0991c0f8f98f185f9c | /1학기/programming/basic/파이썬/파이썬 과제/5/5_6.py | aa374cfaa62fbcb10befe1f8dfa367bc975530d0 | [
"Apache-2.0"
]
| permissive | https://github.com/imscs21/myuniv | 71c0523dba940b98b09c6b7661971a9e60f2ba01 | 79e9002112ed0c1b4001fe13a8f6ee1e4761478c | refs/heads/master | 2020-04-12T02:30:39.679301 | 2016-12-25T11:58:47 | 2016-12-25T11:58:47 | 53,939,682 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def double(n):
return n * 2
def halve(n):
return n // 2
def fastmult(m,n):
if n > 0:
if n % 2 == 0:
return fastmult(double(m),halve(n))
else:
return m+fastmult(m,n-1)
else:
return 0
#그냥 재귀 | UTF-8 | Python | false | false | 271 | py | 190 | 5_6.py | 149 | 0.460076 | 0.43346 | 0 | 13 | 19.307692 | 47 |
abdulrahman-hassanin/kalman_filters | 3,693,671,907,988 | c3cc04b58ab0bc25197c4ad0f68aa9dd76514b7e | fd5e64dd846a5fdf61b2f3946e6103fda6d3726c | /EKF.py | 2719019e4a3f851c3674e86aaabdb9877cbca043 | []
| no_license | https://github.com/abdulrahman-hassanin/kalman_filters | d7a7cae9f37cb6601e726b751ae0915e2fa9c5ea | 256adba8e6e1b381801c9fef4495fbf20ee4661b | refs/heads/master | 2023-08-24T22:17:08.156690 | 2021-10-23T01:29:16 | 2021-10-23T01:29:16 | 418,191,985 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import numpy as np
class ExtendedKalmanFilter(object):
"""
This class implements the Extended kalman filter quations.
arguments:
dim_x: integer
Dimension of the state vector.
dim_z: integer
Dimension of the measurement vector.
x : np.array(dim_x, 1)
Current state vector.
P : np.array(dim_x, dim_x)
Current state vector covariance.
F_ : np.array(dim_x, dim_x)
Transition matrix.
F_fun : Function
In linear case, it update the F_ matrix.
In non-linear case, it calculates f(x).
F_Jacobian_fun :
Jacobbian function of the F.
Q : np.array(dim_x, dim_x)
Process noise covariance.
H_ : np.array(z_dim, x_dim)
Measurment ransition matrix.
H_fun : Function
In linear case, it update the H_ matrix.
In non-linear case, it calculates H(x).
H_Jacobian_fun : Function
Jacobbian function of the H.
R : np.array(dim_z, dim_z)
Measurement nosie covariance.
"""
def __init__(self, x_dim, z_dim, F_fun, F_Jacobian_fun=None, H_fun = None, H_Jacobian_fun = None,
Q_update_fun = None, process_noise_cov=None, measument_noise_cov=None):
self.x_dim = x_dim
self.z_dim = z_dim
self.x = np.ones((self.x_dim, 1))
self.P = np.eye(self.x_dim)
self.F_fun = F_fun
self.F_Jacobian_fun = F_Jacobian_fun
self.F_ = np.ones((x_dim, x_dim))
self.H_fun = H_fun
self.H_Jacobian_fun = H_Jacobian_fun
self.H_ = np.ones((z_dim, x_dim))
self.Q = np.ones((self.x_dim, self.x_dim))
self.Q_update_fun = Q_update_fun
self.process_noise_cov = process_noise_cov
self.R = measument_noise_cov
self.prev_time_stamp = 0
self.dt = 0
def predict(self):
"""
This function to perform the prediction step in kalman filter, to compute
the predict density by calculating the mean and the covariance of the state
"""
self.Q = self.Q_update_fun(self.x, self.process_noise_cov, self.dt)
if self.F_Jacobian_fun is None:
self.F_ = self.F_fun(self.x, self.dt)
self.x = np.dot(self.F_, self.x)
self.P = np.dot((np.dot(self.F_, self.P)), self.F_.T) + self.Q
else:
self.F_ = self.F_Jacobian_fun(self.x, self.z_dim, self.x_dim, self.dt)
self.x = self.F_fun(self.x, self.dt)
self.P = np.dot((np.dot(self.F_, self.P)), self.F_.T) + self.Q
def update(self, measurment):
"""
This function to perform the update step in the kalman filter, to compute the
posterior density, given the prior density. it compute the kalman gain (K),
then the innivation gain (V), and the innovation covariance (S). Finally it return
the mean and covariance of the posterior density.
"""
if self.H_Jacobian_fun is None:
self.H_ = self.H_fun(self.x, self.dt)
predicted_measurent = np.dot(self.H_, self.x)
else:
self.H_ = self.H_Jacobian_fun(self.x, self.z_dim, self.x_dim, self.dt)
predicted_measurent = self.H_fun(self.x)
V = measurment - predicted_measurent
S = np.dot(np.dot(self.H_, self.P), self.H_.T) + self.R
K = np.dot(np.dot(self.P, self.H_.T), np.linalg.inv(S))
self.x = self.x + np.dot(K, V)
self.P = self.P - np.dot(np.dot(K, S), K.T)
return V
def calculate_rmse(self, estimations, ground_truth):
'''
Root Mean Squared Error.
'''
if len(estimations) != len(ground_truth) or len(estimations) == 0:
raise ValueError('calculate_rmse () - Error - estimations and ground_truth must match in length.')
rmse = np.zeros((self.x_dim, 1))
for est, gt in zip(estimations, ground_truth):
rmse += np.square(est - gt)
rmse /= len(estimations)
return np.sqrt(rmse) | UTF-8 | Python | false | false | 4,149 | py | 8 | EKF.py | 7 | 0.559171 | 0.557725 | 0 | 114 | 35.403509 | 110 |
yibo7/XsAirplane | 19,396,072,346,858 | 7d92f539071ea9c8b315c0fe2c3030cb62fb234e | b7a5d5e54705f60ab41ae38c899670e0359e45ce | /bullets/bullet_hero.py | 36b34f89ff9d59bccefb7fa6fb0d7b83e367ed5c | [
"MIT"
]
| permissive | https://github.com/yibo7/XsAirplane | 0ea8aceffdcd991522d13fa18d14f2131c304435 | 817212bbd33ac04547c38a1b4c780f0bb0bcb506 | refs/heads/main | 2023-08-25T23:59:40.895482 | 2021-11-07T05:40:48 | 2021-11-07T05:40:48 | 425,265,127 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from SpriteBase import SpriteBase
"""
英雄机的子弹
"""
class BulletHero(SpriteBase):
def __init__(self, sp, screen, img_index, xspeed=0, yspeed=15):
"""
创建英雄机子弹
:param sp: 来自哪个英雄机
:param screen: 要将子弹绘制到哪个场景
:param img_index: 子弹的默认造型
:param xspeed: 子弹的横向速度
:param yspeed: 子弹的纵向速度
"""
self.x_speed = xspeed # 子弹每次移动的横向步数
# self.y_step = 0
paths = []
for i in range(6):
paths.append(f"images/herobullet/{i}.png")
# 子弹的位置应该出现在英雄机的中上方
pos = [sp.rect.left + 30, sp.rect.top - 50]
super(BulletHero, self).__init__(screen, paths, pos, speed=yspeed, image_index=img_index)
def action(self):
if self.rect.top > 0: # 如果子弹还没到屏幕的最上方,就一直向上移动
# self.y_step += 1
self.rect.y -= self.speed
if self.x_speed != 0: # 向上移动的同时是否横着走,x_speed为正数,表示向右横,为负数表达向左横
self.rect.x += self.x_speed
self.draw()
else:
self.kill()
| UTF-8 | Python | false | false | 1,292 | py | 14 | bullet_hero.py | 12 | 0.542802 | 0.531128 | 0 | 39 | 25.358974 | 97 |
eaverdeja/beginning_python | 6,425,271,109,117 | 699327d021f1c8f95a1321347c809d2f9359091d | 31cdf1d457eec35ca9c6723953a8412079de9544 | /exercises/exercise_5.py | c8e3d3c6684903f3f017705f70ce86676a6b4ee7 | []
| no_license | https://github.com/eaverdeja/beginning_python | 53176ebf27a00d77692d85db37f2c015a43c66bc | d0976c8fc76dc4a1fa1bb05a8969ed02e231d151 | refs/heads/master | 2020-03-26T23:35:58.890602 | 2018-10-30T11:50:27 | 2018-10-30T11:50:27 | 145,554,567 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import turtle
turtle = turtle.Turtle()
turtle.reset()
def irParaDireita(distancia):
turtle.up()
turtle.fd(distancia)
turtle.down()
def moverTurtle(turtle, x, y):
turtle.up()
turtle.goto(x, y)
turtle.down()
def desTrianRet(turtle, tamanhoCateto):
turtle.forward(tamanhoCateto)
turtle.left(90)
turtle.forward(tamanhoCateto)
turtle.left(135)
#Pitágoras
hipotenusa = math.sqrt(tamanhoCateto ** 2 + tamanhoCateto ** 2)
turtle.forward(hipotenusa)
def desTrianRetPreenchido(turtle, tamanhoCateto, cor):
turtle.fillcolor(cor)
turtle.begin_fill()
desTrianRet(turtle, tamanhoCateto)
turtle.end_fill()
def desLinha(turtle, tamanho, cor, espessura):
turtle.width(espessura)
turtle.color(cor)
turtle.fd(tamanho)
def desCirculo(turtle, raio, x, y, cor, espessura):
moverTurtle(turtle, x, y)
turtle.width(espessura)
turtle.color(cor)
turtle.circle(raio)
def desCirculoPreenchido(turtle, raio, x, y, cor, espessura):
turtle.fillcolor(cor)
turtle.begin_fill()
desCirculo(turtle, raio, x, y, cor, espessura)
turtle.end_fill()
#1.1
moverTurtle(turtle, -300, 0)
desTrianRet(turtle, 200)
#1.2
turtle.left(90)
irParaDireita(100)
desTrianRetPreenchido(turtle, 200, 'red')
#1.3
turtle.left(180)
irParaDireita(400)
desLinha(turtle, 100, 'green', 10)
#1.4
turtle.left(180)
irParaDireita(400)
desCirculo(turtle, 100, 0, 0, 'blue', 10)
#1.4
turtle.left(180)
irParaDireita(400)
desCirculoPreenchido(turtle, 100, 0, 0, 'blue', 10)
| UTF-8 | Python | false | false | 1,554 | py | 13 | exercise_5.py | 13 | 0.69414 | 0.64971 | 0 | 70 | 21.185714 | 67 |
shivam-raj/Focal-Loss-demo | 5,265,629,917,272 | 4b0733a9063704f477b38c73329733725a2bd7d3 | 08967f8e974708159684e9b8d500b7d2748cd5f8 | /data.py | 563d63631c3f0b63536bd459755a1d8f1469096c | []
| no_license | https://github.com/shivam-raj/Focal-Loss-demo | d5d2b85125b47f94491cd845ac7e8e17ec8908e1 | 7e4a26b91b9ed6a403e1b73c3033f254ca8f23c5 | refs/heads/master | 2022-03-03T06:11:08.655602 | 2019-10-31T12:57:00 | 2019-10-31T12:57:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
keras = tf.keras
def get_data(IMG_SIZE,name,classes):
wts=(8,1,1)
splits=tfds.Split.TRAIN.subsplit(weighted=wts)
(raw_train,raw_val,raw_test),metadata=tfds.load(name,split=list(splits),with_info=True,as_supervised=True)
def format_example(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
label=tf.one_hot(label,classes)
return image, label
train=raw_train.map(format_example)
val=raw_val.map(format_example)
test=raw_test.map(format_example)
get_label_name = metadata.features['label'].int2str
return train,val,test,get_label_name | UTF-8 | Python | false | false | 848 | py | 5 | data.py | 4 | 0.709906 | 0.696934 | 0 | 22 | 37.590909 | 110 |
NathanaelNeria/Workshop4 | 14,963,666,101,109 | 588a5a876e6686518745577eee918ff59e2bdc9e | 783f4a5ab680d3573b37e6b9be3bb72943ea8228 | /ascii.py | 4873c52752acb5ae139fdf94e2908718d4db23aa | []
| no_license | https://github.com/NathanaelNeria/Workshop4 | fa58a0c1aceacacf07c6a6536b4ea354e67722fd | b7a417fcf012cb629718798628ed5f30f689f60e | refs/heads/master | 2021-01-01T03:45:17.111936 | 2016-06-10T14:24:48 | 2016-06-10T14:24:48 | 57,095,454 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def get_number(lower=10, upper=50):
user_input = input("Enter Enter a number {}-{}".format(lower, upper))
user_input2 = input("Enter Enter a number {}-{}".format(lower, upper))
while True:
if user_input.isdecimal() == True and user_input2.isdecimal() == True:
if int(user_input) >= lower and int(user_input) <= upper and int(user_input2) >= lower and int(user_input2) <= upper:
for i in range(int(user_input),int(user_input2)):
print("{} {}".format(i, chr(i)).strip())
break
else:
print("Enter a valid number!")
user_input = input("Enter Enter a number {}-{}".format(lower, upper))
else:
print("Enter a valid number!")
user_input = input("Enter Enter a number {}-{}".format(lower, upper))
get_number()
| UTF-8 | Python | false | false | 876 | py | 2 | ascii.py | 2 | 0.545662 | 0.535388 | 0 | 18 | 47.444444 | 129 |
idin/silverware | 2,319,282,349,476 | b7b4393f24b9d3309e4e80f1c9e05f838571da0d | 9752669b19ad3893f080230bec33fb1ef3ae0a66 | /silverware/htmltext/is_bullet_point.py | 613deafedc54bd1609f73da2b066d7fdbe1acaea | [
"MIT"
]
| permissive | https://github.com/idin/silverware | 085d535723eb9cd6684b95fbac63a04fec5d428a | 2c47931937f4b1d34e97a1dfa3e58255e57e3545 | refs/heads/master | 2021-07-04T11:54:36.766094 | 2020-10-26T00:26:33 | 2020-10-26T00:26:33 | 189,703,237 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def has_bullet(string):
if not isinstance(string, str):
return False
string = string.strip()
if len(string) < 2:
return False
# if it is of type:
# - bullet point
# a bullet point
# 1 bullet point
# split by space
split_by_space = string.split()
if len(split_by_space) > 1 and len(split_by_space[0]) == 1 and split_by_space[1][0].isalpha():
return True
# if it is of type:
# -bullet point
# *bullet point
if not string[0].isalpha() and not string[0].isnumeric() and string[1].isalpha():
return True
return False
def is_bullet_heading(string):
if not isinstance(string, str):
return False
string = string.strip()
if len(string) < 2:
return False
if string[-1] in [':', '?']:
return True
return False
def is_bullet_point(text, previous_text):
if not isinstance(text, str) or not isinstance(previous_text, str):
return False
text = text.strip()
previous_text = previous_text.strip()
if len(text) < 2 or len(previous_text) < 2:
return False
if is_bullet_heading(previous_text) and has_bullet(text):
return True
if has_bullet(previous_text) and has_bullet(text):
return True
return False
| UTF-8 | Python | false | false | 1,155 | py | 33 | is_bullet_point.py | 31 | 0.677056 | 0.664935 | 0 | 59 | 18.576271 | 95 |
monisjaved/rucio | 3,049,426,815,139 | 7653a97e19cd18e12e7aee871f3df55102c49350 | 5271bf1f72c49ceb50a1bf3615b424d866cb7538 | /lib/rucio/vcsversion.py | 00f14851e09d83dbfe45b5f613be251a0c503dc5 | [
"Apache-2.0"
]
| permissive | https://github.com/monisjaved/rucio | d0d6b50e14b5045fbdef84d84875b294be79b325 | b3e76f79dda668d55d648e22051d0efcd21ce07f | refs/heads/master | 2021-01-22T17:42:52.643231 | 2017-03-14T13:55:44 | 2017-03-14T13:55:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
This file is automatically generated by setup.py, So don't edit it. :)
'''
VERSION_INFO = {
'final': True,
'version': '1.2.5-1',
'branch_nick': '(detached',
'revision_id': '0b06384e2873c3b5930ff40858fb88e3d64ec2ac',
'revno': 3266
}
| UTF-8 | Python | false | false | 256 | py | 3 | vcsversion.py | 3 | 0.628906 | 0.496094 | 0 | 10 | 24.6 | 70 |
FuShengRuoXian/django-spms | 12,472,585,041,330 | 3aa4fc2999e98e182c3b76e72443ebc2327d08cd | cf5e767faabe7307a8e081b35fd9600d003f8665 | /src/tests/create_token.py | 1b6c8f3baaff668674b2cddb682d3e472173b334 | [
"BSD-3-Clause"
]
| permissive | https://github.com/FuShengRuoXian/django-spms | bfd0ec8cc7ef957c8e6e308e1e9bf4259cf7c551 | 95ac193891f93da07c3a26feeaf846e6030f3466 | refs/heads/master | 2023-05-23T20:09:17.964349 | 2020-03-12T09:58:27 | 2020-03-12T09:58:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
user = 'admin'
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
print(token)
| UTF-8 | Python | false | false | 296 | py | 109 | create_token.py | 75 | 0.736486 | 0.733108 | 0 | 13 | 20.615385 | 54 |
lukmy/toolbox | 16,097,537,430,109 | 3bb5e9322c3dd927b56a247fc9ba2c195b438c52 | 3b0f4b5686a79a7a369ff20f27957cfa5d8dc09c | /bin/git.py | 5c3a5bd49272ac1b405a52a6dcfdcaa65cf64a50 | []
| no_license | https://github.com/lukmy/toolbox | 5568c7e1d0a3516d9b9f040ac4baf7251eed5f41 | 820ef4ae913b41ece6e15f3230b89ef095348952 | refs/heads/master | 2021-07-05T08:32:11.031466 | 2017-09-28T04:40:44 | 2017-09-28T04:40:44 | 103,383,165 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from toolbox_utils import get_config, run_cmd, FireBase
class Config(object):
config = get_config().get('git', {})
feature_prefix = config.get('feature_branch_prefix', 'feature/')
feature_base = config.get('feature_branch_base', 'develop')
publish_default_remote = config.get('publish_default_remote', 'origin')
config = Config()
class Git(FireBase):
_config = config
def feature_start(self, name, base=None):
run_cmd('git checkout -b {0}{1} {2}'.format(
config.feature_prefix, name, base or config.feature_base))
def feature_publish(self, remote=None, force=False):
run_cmd('git push {0} -u {1} HEAD'.format(
remote or config.publish_default_remote, '-f' if force else ''))
def feature_list(self):
run_cmd('git branch | grep "{0}"'.format(config.feature_prefix))
if __name__ == '__main__':
from fire import Fire
Fire(Git)
| UTF-8 | Python | false | false | 1,029 | py | 6 | git.py | 6 | 0.641399 | 0.635569 | 0 | 39 | 25.384615 | 76 |
daid/Cura2 | 8,272,107,012,710 | 6d9cd684ae143b544bb89fc9b08c2b7deb8a4dda | d7b178ce6d6afd53e71e21221555c4b35fa4463e | /Cura/gui/openGLUtils.py | d07ef1870250bc0cf597515ac39bf389a4cc08e6 | []
| no_license | https://github.com/daid/Cura2 | 0badce5a5c6bdcb2fe46f6d4d8b963881492c30a | 312baa01920a929b5ca5dd0d753745b921225e52 | refs/heads/master | 2016-09-06T08:35:26.771679 | 2014-11-21T14:50:50 | 2014-11-21T14:50:50 | 18,211,474 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import wx
import numpy
import os
import atexit
from OpenGL.GL import *
from OpenGL.GL import shaders
from ctypes import c_void_p
from Cura.preferences import getPreference
from Cura.resources import getResourcePath
from Cura.resources import getBitmap
legacyMode = getPreference('legacy_rendering', 'False') == 'True'
shuttingDown = False
contextSource = None
#Register an at-exit function so we do not try to cleanup the OpenGL context on exit. (Window that needs to handle the cleanup could already be destroyed)
def _death():
global shuttingDown
shuttingDown = True
atexit.register(_death)
class GLShader(object):
def __init__(self, vertexProgram=None, fragmentProgram=None, filename=None):
super(GLShader, self).__init__()
if filename is not None:
self._loadFromFile(filename)
else:
self._filename = None
self._vertexString = vertexProgram
self._fragmentString = fragmentProgram
self._program = None
self._contextSource = None
def _loadFromFile(self, filename):
vertexProgram = ''
fragmentProgram = ''
type = 'BOTH'
for line in open(os.path.join(getResourcePath('shaders'), filename), "r"):
if line.startswith('--'):
type = line[2:].strip()
continue
if type == 'BOTH':
vertexProgram += line
fragmentProgram += line
elif type == 'VERTEX':
vertexProgram += line
elif type == 'FRAGMENT':
fragmentProgram += line
self._vertexString = vertexProgram
self._fragmentString = fragmentProgram
def bind(self):
if self._program is None and self._vertexString is not None and not legacyMode:
global contextSource
self._contextSource = contextSource
vertexString = self._vertexString
fragmentString = self._fragmentString
self._vertexString = None
try:
vertexShader = shaders.compileShader(vertexString, GL_VERTEX_SHADER)
fragmentShader = shaders.compileShader(fragmentString, GL_FRAGMENT_SHADER)
#shader.compileProgram tries to return the shader program as a overloaded int. But the return value of a shader does not always fit in a int (needs to be a long). So we do raw OpenGL calls.
# This is to ensure that this works on intel GPU's
# self._program = shaders.compileProgram(self._vertexProgram, self._fragmentProgram)
self._program = glCreateProgram()
glAttachShader(self._program, vertexShader)
glAttachShader(self._program, fragmentShader)
glLinkProgram(self._program)
# Validation has to occur *after* linking
glValidateProgram(self._program)
if glGetProgramiv(self._program, GL_VALIDATE_STATUS) == GL_FALSE:
raise RuntimeError("Validation failure: %s" % (glGetProgramInfoLog(self._program)))
if glGetProgramiv(self._program, GL_LINK_STATUS) == GL_FALSE:
raise RuntimeError("Link failure: %s" % (glGetProgramInfoLog(self._program)))
glDeleteShader(vertexShader)
glDeleteShader(fragmentShader)
except RuntimeError, e:
print str(e)
self._program = None
if self._program is not None:
shaders.glUseProgram(self._program)
def unbind(self):
shaders.glUseProgram(0)
def release(self):
if self._program is not None:
glDeleteProgram(self._program)
self._program = None
def setUniform(self, name, value):
if self._program is not None:
if type(value) is float:
glUniform1f(glGetUniformLocation(self._program, name), value)
elif type(value) is numpy.matrix:
glUniformMatrix3fv(glGetUniformLocation(self._program, name), 1, False, value.getA().astype(numpy.float32))
else:
print 'Unknown type for setUniform: %s' % (str(type(value)))
def isValid(self):
return self._program is not None
def getVertexShader(self):
return self._vertexString
def getFragmentShader(self):
return self._fragmentString
def __del__(self):
global shuttingDown
if not shuttingDown and self._program is not None:
self._contextSource.addToReleaseList(self)
class VertexRenderer(object):
def __init__(self, renderType, vertexData, hasNormals=True, indices=None):
self._renderType = renderType
self._vertexData = vertexData
self._indices = indices
self._hasNormals = hasNormals
self._buffers = None
def render(self):
if legacyMode:
glEnableClientState(GL_VERTEX_ARRAY)
if self._hasNormals:
glEnableClientState(GL_NORMAL_ARRAY)
glVertexPointer(3, GL_FLOAT, 2 * 3 * 4, self._vertexData)
glNormalPointer(GL_FLOAT, 2 * 3 * 4, self._vertexData.reshape(len(self._vertexData) * 6)[3:])
else:
glVertexPointer(3, GL_FLOAT, 3 * 4, self._vertexData)
if self._indices is not None:
glDrawElements(self._renderType, self._indices.size, GL_UNSIGNED_INT, self._indices)
else:
glDrawArrays(self._renderType, 0, len(self._vertexData))
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_NORMAL_ARRAY)
else:
if self._buffers is None:
global contextSource
self._contextSource = contextSource
self._buffers = []
if self._indices is not None:
maxBufferLen = len(self._vertexData)
bufferCount = 1
else:
maxBufferLen = 30000
bufferCount = ((len(self._vertexData)-1) / maxBufferLen) + 1
for n in xrange(0, bufferCount):
bufferInfo = {
'buffer': glGenBuffers(1),
'len': maxBufferLen,
}
offset = n * maxBufferLen
if n == bufferCount - 1:
bufferInfo['len'] = ((len(self._vertexData) - 1) % maxBufferLen) + 1
glBindBuffer(GL_ARRAY_BUFFER, bufferInfo['buffer'])
glBufferData(GL_ARRAY_BUFFER, self._vertexData[offset:offset+bufferInfo['len']], GL_STATIC_DRAW)
self._buffers.append(bufferInfo)
glBindBuffer(GL_ARRAY_BUFFER, 0)
if self._indices is not None:
self._bufferIndices = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self._bufferIndices)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, numpy.array(self._indices, numpy.uint32), GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
for bufferInfo in self._buffers:
glBindBuffer(GL_ARRAY_BUFFER, bufferInfo['buffer'])
glEnableClientState(GL_VERTEX_ARRAY)
if self._hasNormals:
glEnableClientState(GL_NORMAL_ARRAY)
glVertexPointer(3, GL_FLOAT, 2 * 3 * 4, c_void_p(0))
glNormalPointer(GL_FLOAT, 2 * 3 * 4, c_void_p(3 * 4))
else:
glVertexPointer(3, GL_FLOAT, 3 * 4, c_void_p(0))
if self._indices is not None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self._bufferIndices)
glDrawElements(self._renderType, self._indices.size, GL_UNSIGNED_INT, c_void_p(0))
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
else:
glDrawArrays(self._renderType, 0, bufferInfo['len'])
glBindBuffer(GL_ARRAY_BUFFER, 0)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_NORMAL_ARRAY)
def release(self):
if not legacyMode and self._buffers is not None:
for info in self._buffers:
glDeleteBuffers(1, [info['buffer']])
if self._indices is not None:
glDeleteBuffers(1, [self._bufferIndices])
self._buffers = None
self._bufferIndices = None
def __del__(self):
global shuttingDown
if not shuttingDown and self._buffers is not None:
self._contextSource.addToReleaseList(self)
class GLTexture(object):
def __init__(self, filename, filter='linear'):
self._texture = None
self._filename = filename
self._filter = filter
self._contextSource = None
def bind(self):
if self._texture is None:
global contextSource
self._contextSource = contextSource
self._texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._texture)
if self._filter == 'linear':
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
else:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
img = wx.ImageFromBitmap(getBitmap(self._filename))
rgbData = img.GetData()
alphaData = img.GetAlphaData()
if alphaData is not None:
data = ''
for i in xrange(0, len(alphaData)):
data += rgbData[i * 3:i * 3 + 3] + alphaData[i]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img.GetWidth(), img.GetHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, data)
else:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img.GetWidth(), img.GetHeight(), 0, GL_RGB, GL_UNSIGNED_BYTE, rgbData)
glBindTexture(GL_TEXTURE_2D, 0)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self._texture)
def unbind(self):
glBindTexture(GL_TEXTURE_2D, 0)
def release(self):
if self._texture is not None:
glDeleteTextures(1, [self._texture])
self._texture = None
def __del__(self):
global shuttingDown
if not shuttingDown and self._program is not None:
self._contextSource.addToReleaseList(self)
def unproject(winx, winy, winz, modelMatrix, projMatrix, viewport):
"""
Projects window position to 3D space. (gluUnProject). Reimplementation as some drivers crash with the original glu version.
"""
npModelMatrix = numpy.matrix(numpy.array(modelMatrix, numpy.float64).reshape((4,4)))
npProjMatrix = numpy.matrix(numpy.array(projMatrix, numpy.float64).reshape((4,4)))
finalMatrix = npModelMatrix * npProjMatrix
finalMatrix = numpy.linalg.inv(finalMatrix)
viewport = map(float, viewport)
vector = numpy.array([(winx - viewport[0]) / viewport[2] * 2.0 - 1.0, (winy - viewport[1]) / viewport[3] * 2.0 - 1.0, winz * 2.0 - 1.0, 1]).reshape((1,4))
vector = (numpy.matrix(vector) * finalMatrix).getA().flatten()
ret = list(vector)[0:3] / vector[3]
return ret
def glMultiplyMatrix(matrix):
m = numpy.identity(4, numpy.float64)
m[:3, :3] = matrix
glMultMatrixd(m) | UTF-8 | Python | false | false | 11,795 | py | 88 | openGLUtils.py | 83 | 0.574735 | 0.564646 | 0 | 274 | 41.054745 | 205 |
dsparrow27/vortex | 19,000,935,333,234 | f4e16c04936e030848023e9a30e961e757d2e67f | b5d1c585c9c601588eb46b6d342a559feea967df | /src/ds/vortex/graphs/__init__.py | af6b94acbf0773337c459ce112d82a086e3bcbad | [
"MIT"
]
| permissive | https://github.com/dsparrow27/vortex | f298467b6ab757ae724c70761580cc19394f8570 | c6624fd91d1a989322197e5ad81af94793d67794 | refs/heads/master | 2021-01-10T12:22:27.036624 | 2016-03-06T11:06:46 | 2016-03-06T11:06:46 | 48,081,530 | 1 | 0 | null | false | 2016-01-09T11:38:22 | 2015-12-16T02:15:31 | 2015-12-16T11:51:45 | 2016-01-09T11:38:22 | 103 | 0 | 0 | 0 | Python | null | null | {
"nodes":{},
"version":"1.0.0",
"name":"testGraph",
"edges":{},
"moduleName":"graph"
} | UTF-8 | Python | false | false | 107 | py | 39 | __init__.py | 37 | 0.448598 | 0.420561 | 0 | 7 | 14.428571 | 24 |
muditagr/twitter-feed-django | 833,223,664,416 | 53216fdbb5235fb680bce190e96bf6529bb4823f | a11b5d79e1308f4d4e12a4044a8c857370d492d5 | /users/serializers.py | 36a6e277754ddfe0bb1f7bc8edee64d64adb412f | []
| no_license | https://github.com/muditagr/twitter-feed-django | 504129f7e9588640179907eee2bc783ab00754c8 | 0a7d8e43d4b76ad9d9a83265249fc4e510510a82 | refs/heads/master | 2020-12-07T14:15:51.587511 | 2020-01-09T06:58:12 | 2020-01-09T06:58:12 | 232,735,861 | 0 | 0 | null | false | 2020-06-06T01:17:05 | 2020-01-09T06:11:58 | 2020-01-09T06:58:24 | 2020-06-06T01:17:03 | 40 | 0 | 0 | 2 | Python | false | false | from rest_framework import serializers
from . import models
class TweetOptionSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
kwargs['partial'] = True
super(TweetOptionSerializer, self).__init__(*args, **kwargs)
class Meta:
model = models.TweetOption
fields = [
'pk',
'user_id',
'option',
'tweet_id',
] | UTF-8 | Python | false | false | 436 | py | 12 | serializers.py | 10 | 0.555046 | 0.555046 | 0 | 19 | 22 | 68 |
narloch/Graphs | 2,559,800,525,317 | 191bd2dfb9168761c022479ff2d16954c53ff8fc | d540849e98af0d29427870f9877b0accd111417a | /atividade2/kruskal.py | 4d5f4050d42206678447ddae7329530d89ea3c4d | []
| no_license | https://github.com/narloch/Graphs | 132241a8c012fa7d3996a7d18a6c4bfc1ab40633 | b6eab060136d86c3b83a0e09522d4042218dbe87 | refs/heads/main | 2023-04-01T11:29:17.525033 | 2021-04-11T21:54:08 | 2021-04-11T21:54:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from parser_arquivo import parser_arquivo
from grafo import Grafo
import math
import sys
with open("grafo.txt") as f:
arquivo_grafo = f.read().splitlines()
vertices, arestas, mapa_arestas = parser_arquivo(arquivo_grafo)
grafo = Grafo(vertices, arestas, mapa_arestas)
arvore = []
s = [[v] for v in range(grafo.qnt_Vertices())]
# pode rir professor, o desespero é o pai da gambiarra
dic_arestas = {grafo.peso(aresta[0], aresta[1]): aresta for aresta in grafo.arestas}
arestas_ordenadas = [dic_arestas[chave] for chave in sorted(dic_arestas.keys())]
for aresta in arestas_ordenadas:
if s[aresta[0]-1] != s[aresta[1]-1]:
arvore = arvore + [aresta]
x = s[aresta[0]-1] + s[aresta[1]-1]
for y in x:
s[y-1] = x
print(sum([grafo.peso(aresta[0], aresta[1]) for aresta in arvore]))
print(*(str(aresta[0]) + '-' + str(aresta[1]) for aresta in arvore), sep= ', ')
| UTF-8 | Python | false | false | 903 | py | 8 | kruskal.py | 8 | 0.660754 | 0.644124 | 0 | 28 | 31.214286 | 84 |
Pjmcnally/algo | 17,549,236,383,757 | b4ed5f6f88d62b61cf3211fd8cb994549c64035f | 23bdedd37d1a3c8eb8a0dda040316b9870c2af10 | /strings/pins/pins_patrick.py | 3a8b2ff275c6ef15b56b585d5d31183442221f92 | []
| no_license | https://github.com/Pjmcnally/algo | 7dd315635ab8e9e92f274e42010854c9fb1babe3 | 63a59b2a14da1e5ecb225d1f2bd9779115ac2a82 | refs/heads/main | 2023-08-03T02:12:07.994820 | 2023-07-31T08:54:42 | 2023-07-31T08:54:42 | 111,017,401 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Authored by Patrick McNally
# Created on 09/25/15
# Requests a number from the user and returns a pronouncable string
# representing the number input.
def break_string(num):
"""Return a list of numbers broken into two digit pieces.
Take a number and iterates through it. Each two digit chuch of the
digit are broken off and attacked to a list. The list is then reversed
to match the original order.
Parameters
---------
Input:
num: Int
A number to be converted to a pronouncable string.
Output:
new_lst: List
A list of the number broken into pieces of 2 digits or smaller.
"""
new_lst = []
while num:
new_lst.append(num % 100)
num = int(num // 100)
new_lst.reverse()
return new_lst
def num_to_char(lst_):
"""Return a pronouncable string from a list of numbers.
Each two digit chunk in the list is converted to a pronouncable string
of two characters. This is then appended to the final string to be
returned to the user.
Parameters
---------
Input:
lst_: List
A list of numbers 2 digits or less
Output:
new_str: String
A pronouncable string representing the original number entered.
"""
CONSONANTS = "bcdfghjklmnpqrstvwyz"
VOWELS = "aeiou"
new_str = ""
for x in lst_:
new_str += CONSONANTS[x // 5]
new_str += VOWELS[x % 5]
return new_str
def convert_pin(num):
""" Return a pronouncable string from any number
The core of this program is the idea that any two digit number (100
total) can be converted to a combination of 1 consonant (not including
x or y) and 1 vowel (not including y) (100 total).
Parameters
---------
Input:
num: Int
A number to be converted to a pronouncable string.
Output:
out_str: String
A pronouncable string representing the original number entered.
"""
try:
if num <= 0:
return ValueError
new_lst = break_string(num)
out_str = num_to_char(new_lst)
return out_str
except:
return ValueError
assert break_string(4327) == [43, 27]
assert break_string(1298) == [12, 98]
assert break_string(54327) == [5, 43, 27]
assert num_to_char([43, 27]) == "lohi"
assert num_to_char([12, 98]) == "dizo"
assert num_to_char([5, 43, 27]) == "calohi"
assert convert_pin(None) == ValueError
assert convert_pin('absd')== ValueError
assert convert_pin(0) == ValueError
assert convert_pin(4327) == "lohi"
assert convert_pin(1298) == "dizo"
if __name__ == '__main__':
num = eval(input("What number would you like to make pronouncable? "))
print("The pronouncable version of your number is :", convert_pin(num))
| UTF-8 | Python | false | false | 2,764 | py | 79 | pins_patrick.py | 68 | 0.633502 | 0.607091 | 0 | 104 | 25.557692 | 76 |
protasovse/mlo | 14,181,982,013,708 | d319aee5127b82032a767b283524d85f61919ec1 | b17a3a61c0d86e8011fd39c9aabd5aa65c591998 | /project/apps/sxgeo/admin.py | 0dda61bd0dcdfb6ef89f79401a704cd769ca0229 | []
| no_license | https://github.com/protasovse/mlo | 9e8b78cd07b50521e98521ba3339fe40b0b4023a | b288dadab29dbd53e2180d4567f7ac745cf6852b | refs/heads/master | 2020-04-13T11:45:19.195875 | 2018-12-20T05:59:50 | 2018-12-20T05:59:50 | 163,182,777 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from apps.sxgeo.models import Cities
# @admin.register(Cities)
# class CitiesAdmin(admin.ModelAdmin):
# search_fields = ['name_ru', 'name_en']
| UTF-8 | Python | false | false | 183 | py | 344 | admin.py | 240 | 0.726776 | 0.726776 | 0 | 8 | 21.875 | 44 |
thirumald/ansible-cisco-conf | 12,670,153,531,609 | 999504e593ac49d11e218912176d8928e043796b | c472d19a8e10c2671dcbccd4003be76040d591b1 | /vlans.py | 3a5c72cb1c5e65746d492c0aed8daafb38b11d7d | []
| no_license | https://github.com/thirumald/ansible-cisco-conf | 64f32bc5c222505ec1e19ddd4e7339f57aad86e2 | 5382355d195979392c20461a0454fb474d2798da | refs/heads/master | 2021-06-01T13:18:20.906291 | 2016-06-21T12:29:53 | 2016-06-21T12:29:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import argparse
import json
import csv
import sys
class Inventory(object):
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
def host_list(self):
# Define list of hosts and list of hostvars
h_list=[]
h_hosts_raw=[]
h_var_total = {}
# Open CSV as dictionary
vlan_file = open('vlans.csv')
vlan_dict = csv.DictReader(vlan_file)
# Iterate
for row in vlan_dict:
h_hosts_raw.append(row["hostname"])
h_name = row["hostname"]
# delete variables not to be output to YAML
del row["hostname"]
# delete null values
for k, v in row.items():
if v == '':
del row[k]
row = {"vlans": row}
var = {h_name : row}
h_var_total.update(var)
# remove duplicates from list of hosts
h_list = list(set(h_hosts_raw))
# return stdout in ansible compliant JSON output
return {
"group": {
"hosts": h_list
},
"_meta": {
"hostvars": h_var_total
}
}
# Empty inventory for testing.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.host_list()
json.dump(self.inventory, sys.stdout)
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.empty_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
if __name__ == '__main__':
Inventory()
# EXAMPLE JSON OUTPUT IN ANSIBLE INVENTORY FORMAT
# {"group": {
# "hosts": ["ACCESS-SW1", "ACCESS-SW3", "ACCESS-SW2"]
# }, "_meta": {
# "hostvars": {
# "ACCESS-SW1": {
# "vlans": {
# "10": "data-10",
# "20": "data-20",
# "40": "voice",
# "30": "data-30"
# }
# },
# "ACCESS-SW3": {
# "vlans": {
# "10": "data-10",
# "20": "data-20",
# "40": "voice",
# "30": "data-30"
# }
# },
# "ACCESS-SW2": {
# "vlans": {
# "10": "data-10",
# "20": "data-20",
# "40": "voice",
# "30": "data-30"
# }
# }
# }
# }}
| UTF-8 | Python | false | false | 2,968 | py | 26 | vlans.py | 7 | 0.438679 | 0.422507 | 0 | 113 | 25.265487 | 70 |
spikems/qa | 1,073,741,864,852 | 2a1702af97261efabd523c6a383002ddb355416b | 583453132584363daeb80344fc60f2855991199e | /chatrobot/chatrobot/common/wcut/jieba/dict_norm.py | 9bf388b26e8699be8ed8044a28269715d725d9f0 | []
| no_license | https://github.com/spikems/qa | efcfc3f11f7af7b72fd497a3aa0d1b9eab6aec7f | db5a91083a55e4118b648d62fd0806eb33d1bba3 | refs/heads/master | 2020-03-20T08:46:41.135752 | 2018-06-14T06:57:24 | 2018-06-14T06:57:24 | 137,318,682 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #encoding:utf-8
import os,sys
cur_path = os.path.dirname(os.path.abspath(__file__))
jieba_path = '%s/../' % cur_path
sys.path.insert(0, jieba_path)
import jieba
from jieba.norm import norm_cut, norm_seg
from jieba.norm import load_industrydict
import datetime
import random
class CutWord:
def __init__(self, special_words = [], industrys = []):
self.f_inner_dict = '%s/dict.txt' % os.path.dirname(os.path.abspath(__file__))
self.d_special_words = special_words
self.industrys = industrys
self.f_special_dict = self.f_inner_dict
self.__load_dictionary()
def __load_dictionary(self):
load_industrydict(self.industrys)
if len(self.d_special_words) == 0:
return
special_dict = []
with open(self.f_inner_dict, 'r') as fr:
for line in fr:
special_dict.append(line)
for line in self.d_special_words:
fs = line.strip().split()
if len(fs) == 1:
fs.append('n')
line = "%s 10000 %s\n" % (fs[0], fs[1])
special_dict.append(line)
self.f_special_dict = '%s/temp_dict/dict_%s_%s.txt' % (cur_path, random.randint(0, 100000), str(datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
fw = open(self.f_special_dict, 'w')
for line in special_dict:
fw.write(line)
fw.close()
if os.path.exists(self.f_special_dict):
jieba.set_dictionary(self.f_special_dict)
def cut_seg(self, sentence = ''):
'''
:param special_words: ['美丽 a', '转发 v']
:param industrys: 行业字典, 2汽车, 7美妆, 0 新词
:return:
'''
words = norm_seg(sentence)
return words
def release(self):
if self.f_inner_dict != self.f_special_dict and not len(self.d_special_words):
cmd = 'rm -rf %s' % self.f_special_dict
os.system(cmd)
if __name__ == '__main__':
#CutWord().cut()
sentence = '我是认识石墨烯和凱特琳Chery东方之子'
special_words = ['石墨烯 n', '凱特琳 n']
industrys = [2]
cw = CutWord( special_words = special_words, industrys = industrys)
words = cw.cut_seg(sentence = sentence)
for word in words:
print word.word , word.flag
cw.release()
| UTF-8 | Python | false | false | 2,356 | py | 38 | dict_norm.py | 34 | 0.564035 | 0.554386 | 0 | 76 | 28.763158 | 154 |
gavz/CTF-Writeups-1 | 19,275,813,236,610 | 1eaed0794b8a81b4cacaea89eb4b786ee2beaf81 | f0a229be0ccd235c5285d4fb8e96738492b1daa8 | /cyberschool/rev/3/3.py | 684715fe9f87ec720222e4bbb328a206aac33552 | []
| no_license | https://github.com/gavz/CTF-Writeups-1 | fe37e63b43712b5c5dce08c36c9fc942c172de51 | 829c35b38fe7c2ff2466bde49dec511577684d3d | refs/heads/master | 2022-12-23T02:51:34.143652 | 2020-09-20T17:41:07 | 2020-09-20T17:41:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | flag = list(map(lambda x: int(x, 16), '''94 78 DE C2 D0 FC AE 32 6B B3 3E 1E D0 5B 3B C6 F6 26 06 8A 3C 42 F4 07 11 76 56 E1 11 F1 50 B5 13 E4 96 C6 74 F8 D8 08 41 9F DF 35 00'''.split()))
_1 = lambda j: ((j * 0x6d) & 0xff) ^ 0xef
_2 = lambda j: ((j ^ 0xff) * 0x6b) & 0xff
dl = 0
out = ''
for i in range(len(flag) - 1):
for j in range(32, 127):
if j & 0b1:
t = (_2(j) + dl) & 0xff
else:
t = (_1(j) + dl) & 0xff
if t == flag[i]:
out += chr(j)
dl = t
break
print(out) | UTF-8 | Python | false | false | 559 | py | 81 | 3.py | 11 | 0.466905 | 0.314848 | 0 | 18 | 30.111111 | 191 |
ob2410/Esercizi_SO | 884,763,288,796 | 4f7a5896b6e016d2a138fd648d1e26b1966d5744 | a6b1e93a33f507b97303552a2a6ae586c3a0dbd4 | /ProvaPratica/2014-09-25/es3.py | 3da13149a9f73d6e4f6acfa4cdf2ff738f91446c | []
| no_license | https://github.com/ob2410/Esercizi_SO | a83acc1be0195b9c3e55391ecc10d44e6162f83d | 27a4feb15b4f3725ad3d498300857ec5b7f22bcc | refs/heads/master | 2021-01-17T19:13:14.859501 | 2016-07-29T06:15:10 | 2016-07-29T06:15:10 | 64,322,649 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Scrivere un programma python o uno scrip bash che faccia un backup storico di un file.
# backn file n
# deve mantenere n versioni del file specificato. n>2
# Esempio:
# backn miofile 10
# se esiste miofile.9 deve essere rinominato in miofile.10
# se esiste miofile 8 deve essere rinominato in miofile 9
# e cosi' via fino a miofile.2, rinominato miofile 3.
# ora se miofile.1 ha lo stesso contenuto di miofile.3, miofile diventa un link fisico a miofile.2
# miofile viene copiato in miofile.1
| UTF-8 | Python | false | false | 496 | py | 42 | es3.py | 9 | 0.768145 | 0.739919 | 0 | 10 | 48.6 | 98 |
LLT1/SelfNormalizingFlows | 12,695,923,375,353 | 91e31375927d7ebae5a97e1ea1fb4511fa8bc2af | c902925132bc97117945ea46e8241e111a6218b7 | /snf/experiments/toydensity.py | f2f2be9b39260902a9f51a34503b401f1e7b9562 | [
"MIT"
]
| permissive | https://github.com/LLT1/SelfNormalizingFlows | 7b1b620289f1550ea4aad4bf0d08944d39c4ad97 | 3e5b606968275d5a16689e654d6ad10b6c064413 | refs/heads/master | 2023-01-09T19:20:21.688731 | 2020-11-17T09:26:33 | 2020-11-17T09:26:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim.lr_scheduler import StepLR
from snf.datasets.toy_density_data import ToyDensity
from snf.layers.flowsequential import FlowSequential
from snf.layers.selfnorm import SelfNormFC
from snf.layers.activations import SmoothLeakyRelu
from snf.train.losses import NegativeGaussianLoss
def create_model(num_layers=100):
layers = []
for l in range(num_layers):
layers.append(SelfNormFC(2, 2, bias=True))
if l < num_layers - 1:
layers.append(SmoothLeakyRelu(alpha=0.3))
return FlowSequential(NegativeGaussianLoss((2,)), *layers)
def load_data(dataset_name, **kwargs):
dataset = ToyDensity(dataset_name)
train_loader = torch.utils.data.DataLoader(dataset, **kwargs)
return dataset, train_loader
def main():
dataset_name = "moons"
dataset, dataloader = load_data(dataset_name, batch_size=100)
model = create_model()
model.to('cuda')
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = StepLR(optimizer, step_size=1, gamma=1.0)
for e in range(6_000):
total_loss = 0
total_recon_loss = 0
num_batches = 0
for x, _ in dataloader:
optimizer.zero_grad()
x = x.float().to('cuda')
out = -model(x, compute_expensive=False)
lossval = (out).sum() / len(x)
lossval.backward()
# For SNF, add reconstrudction gradient
total_recon_loss = model.add_recon_grad()
total_loss += lossval.item()
total_recon_loss += total_recon_loss.item()
num_batches += 1
optimizer.step()
print(f"Epoch {e}: Total_loss: {total_loss / num_batches}, "
f"Total Recon: {total_recon_loss / num_batches}")
scheduler.step()
if e % 5 == 0:
print(f"Epoch {e}: Total_loss: {total_loss / num_batches}, "
f"Total Recon: {total_recon_loss / num_batches}")
dataset.plot(model, f'{dataset_name}_epoch{e}',
'cuda', dir='ToyDensitySamples') | UTF-8 | Python | false | false | 2,144 | py | 3 | toydensity.py | 3 | 0.61847 | 0.605877 | 0 | 62 | 33.596774 | 72 |
ujjalgoswami/ordermanagementcustomdashboard | 16,166,256,910,113 | 4792bc01256d5f855dc40f677a4d7431c8069835 | 12f18662719d04d2404396b9059b60525528f557 | /findsportsordermanagement-master/stockupdatefiles/Liive.py | a91bda0484868626ec2fe12879ebdb8323702f3e | []
| no_license | https://github.com/ujjalgoswami/ordermanagementcustomdashboard | 0bf4a5770d1913b257a43858d778e630e671a342 | acd18510b0934601d30bd717ea4b3fbb61ecfb5c | refs/heads/master | 2021-02-04T10:04:27.380674 | 2020-02-28T01:37:35 | 2020-02-28T01:37:35 | 243,653,613 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import CONFIG_STOCKUPDATE as config_stockudpate
#!/usr/bin/env python
isProduction=config_stockudpate.isProduction
import numpy as np
import pandas as pd
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import time
import json
from selenium.webdriver.support.ui import WebDriverWait
import math
import re
import os.path
from datetime import datetime
import sys
import requests
from selenium.webdriver.chrome.options import Options
import requests
from datetime import datetime
import os
import numpy as np
import pandas as pd
# In[61]:
cookies = {
'ASPSESSIONIDSQTSSTTT': 'ECIKCDNCNPJIDOOBIFDNDPLB',
'idLiiveVision2AU%5FOrders': '16241',
'LiiveVision2AU%5FCurrencyId': '1',
'LiiveVision2AU%5FOrdersSession': '359963269',
'LiiveVision2AU%5FMembersAuth': 'A002230BA0A07E12A05C69A0A06B',
'blnMobileView': 'false',
'LiiveVision2AU%5FShoppingDescription': '36468+Item%28s%29',
'CartItems': '36468',
'strViewStyle': 'List',
'IsCartPopulated': '1',
'LiiveVision2AU%5FLastViewed': '916%2C1035%2C919%2C918%2C916%2C999%2C914%2C1000%2C999%2C984%2C985%2C1054%2C1053%2C983%2C1046%2C1043%2C1042%2C1041%2C1040%2C',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:67.0) Gecko/20100101 Firefox/67.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'http://sales.liivevision.com/list.asp?idWebPage=484890&CATID=64&ListOptions=Submit&strViewStyle=List&page=1&back=1',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
product_headers = {
'NETOAPI_ACTION': "GetItem",
'NETOAPI_USERNAME': "API-User-Product",
'NETOAPI_KEY': "v0fmsHHYPqfq99lFnPJ1kQbIgynkbLJq",
'Accept': "application/json",
'Content-Type': "application/javascript",
'cache-control': "no-cache",
'Postman-Token': "2473156a-3bcc-4a64-8079-04c3a395b5ea"
}
url = "https://www.findsports.com.au/do/WS/NetoAPI"
def api_product_response(dict_filter, List_of_OutputSelector=None, new_headers=None):
parent_dict = {}
dict_export_status = {}
dict_filter['OutputSelector'] = List_of_OutputSelector
dict_export_status["ExportStatus"] = "Exported"
dict_filter["UpdateResults"] = dict_export_status
parent_dict['Filter'] = dict_filter
payload = json.dumps(parent_dict)
if new_headers is None:
header = product_headers
response = requests.request("POST", url, data=payload, headers=header)
json1_data = json.loads(response.text)
return json1_data
start = time.time()
chrome_options = Options()
chrome_options.add_argument("--headless")
if(isProduction):
driver = webdriver.Chrome(chrome_options=chrome_options)
else:
driver = webdriver.Chrome(config_stockudpate.downloads_folder+'chromedriver',chrome_options=chrome_options)
error_found=True
max_count=0
while(error_found==True and max_count<=5):
try:
driver.get("http://sales.liivevision.com/loginwcs01484893/login.html")
username=driver.find_element_by_xpath('//*[@id="body-container"]/form/table[1]/tbody/tr/td/table/tbody/tr[2]/td[2]/input')
password=driver.find_element_by_xpath('//*[@id="body-container"]/form/table[1]/tbody/tr/td/table/tbody/tr[3]/td[2]/input')
username.send_keys("orders@findsports.com.au")
password.send_keys("finds3175")
driver.find_element_by_xpath('//*[@id="body-container"]/form/table[1]/tbody/tr/td/table/tbody/tr[4]/td[2]/input[2]').click()
page=1;
db=list();
while page <=4:
print(page)
driver.get("http://sales.liivevision.com/list.asp?idWebPage=484890&CATID=64&ListOptions=Submit&strViewStyle=List&page="+str(page)+"&back=1")
count=1;
trs = driver.find_elements_by_xpath("/html/body/div[2]/div/div/table/tbody/tr/td[2]/table[5]/tbody/tr/td/table/tbody/tr")
for tr in trs:
count+=1;
try:
a=driver.find_element_by_xpath('//*[@id="body-container"]/table[5]/tbody/tr/td/table/tbody/tr['+str(count)+']/td[2]/a').get_attribute("href")
sku=driver.find_element_by_xpath("/html/body/div[2]/div/div/table/tbody/tr/td[2]/table[5]/tbody/tr/td/table/tbody/tr["+str(count)+"]/td[3]").get_attribute("innerHTML");
temp=dict();
temp['link']=a
temp['SKU']=sku
db.append(temp)
except:
time.sleep(1)
page+=1;
count=0
d=db[1]
count+=1
for d in db:
count+=1
for s in d['link'].replace("javascript:OpenProductDetails('/prod.asp?","").replace(");void(0)","").split("&"):
if 'idWebPage' in s:
idWebPage=s.replace("idWebPage=","")
if "CATID=" not in s and "SID=" not in s and "ID=" in s:
ID=s.replace("ID=","")
params = (
('idWebPage', idWebPage),
('Type', 'MiniCart'),
('ID', ID),
('SubmitType', 'UpdateCart'),
('strOrderType', ''),
('intQty', '1000'),
)
data=""
response = requests.get('http://sales.liivevision.com/prod.asp', data=data,headers=headers, params=params, cookies=cookies)
content=str(response.content)
qty=content.split("<script> totalItems = ")[1].replace(";</script>'","")
d["QTY"]=qty
print(d)
print(qty)
time.sleep(1)
params = (
('idWebPage', idWebPage),
('Type', 'MiniCart'),
('ID', ID),
('SubmitType', 'UpdateCart'),
('strOrderType', ''),
('intQty', '0'),
)
response = requests.get('http://sales.liivevision.com/prod.asp', data=data,headers=headers, params=params, cookies=cookies)
print(response.content)
message = "Scrapped Products :" + str(count)
sys.stdout.write("\r" + message)
sys.stdout.flush()
error_found=False
except:
print("Error!")
error_found=True
max_count+=1
# In[62]:
df = pd.DataFrame(db)
# In[63]:
import datetime
supplier_name="Liive"
list_of_skus=list(df['SKU'])
list_of_new_qtys=list(df['QTY'])
if not(error_found):
print("SKUS Fetched ",len(list_of_skus))
list_of_qty_sku_dict=api_product_response({'PrimarySupplier':supplier_name},['AvailableSellQuantity','SKU','IsActive'],None)['Item']
print(len(list_of_qty_sku_dict))
columns=['Active','SKU','Existing QTY','New Qty']
df_history = pd.DataFrame(columns=columns)
history_list_of_skus=[]
history_list_of_existing_qty=[]
history_list_of_new_qty=[]
history_list_of_active=[]
list_of_oos_skus=[]
list_of_skus_back_in_stock=[]
count=0
for index in range(0,len(list_of_qty_sku_dict)):
existing_qty=list_of_qty_sku_dict[index]['AvailableSellQuantity']
active=list_of_qty_sku_dict[index]['IsActive']
sku=list_of_qty_sku_dict[index]['SKU']
try:
new_qty=df.loc[df['SKU'] == sku]['QTY'].values[0]
except:
new_qty=0
if(new_qty==0):
list_of_oos_skus.append(sku)
elif(int(existing_qty)<10 and new_qty==10):
list_of_skus_back_in_stock.append(sku)
history_list_of_skus.append(sku)
history_list_of_existing_qty.append(existing_qty)
history_list_of_new_qty.append(new_qty)
history_list_of_active.append(active)
df_history['SKU']=history_list_of_skus
df_history['Existing QTY']=history_list_of_existing_qty
df_history['New Qty']=history_list_of_new_qty
df_history['Active']=history_list_of_active
current_date = datetime.date.today()
if(isProduction):
#Backup
file_name="/home/ubuntu/findsportsordermanagement/static/datafiles/stockupdate/history/"+str(supplier_name)+"_STOCK_UPDATE_"+str(current_date)+".csv"
df_history.to_csv(file_name,index=False)
config_stockudpate.sendfiletos3(file_name,str(supplier_name)+"_STOCK_UPDATE_"+str(current_date)+".csv")
else:
#Backup
file_name="/Users/ujjalgoswami/Desktop/django/django1env/projects/findsportsordermanagement/static/datafiles/stockupdate/history/"+str(supplier_name)+"_STOCK_UPDATE_"+str(current_date)+".csv"
df_history.to_csv(file_name,index=False)
config_stockudpate.sendfiletos3(file_name,str(supplier_name)+"_STOCK_UPDATE_"+str(current_date)+".csv")
print("Total products Back in Stock:",len(list_of_skus_back_in_stock))
print("Potential new products found while scrapping:",len(list_of_skus)-len(list_of_qty_sku_dict))
print("Start Time:",start)
done = time.time()
print("End Time:",done)
elapsed = done - start
print("Time Taken:",elapsed)
final_run_status="Success"
comments="Run Passed"
else:
#Something went wrong!
final_run_status="Failed"
comments="Run Failed"
elapsed=0
oos_items=0
# In[64]:
# if(isProduction):
link2="http://ec2-18-189-22-237.us-east-2.compute.amazonaws.com/stockupdate/setstockupdatestats"
# else:
# link2="http://127.0.0.1:8000/stockupdate/setstockupdatestats"
API_ENDPOINT = link2
history_list_of_existing_qty = [int(i) for i in history_list_of_existing_qty]
#Getting the number of oos products currently in Neto for this supplier
prev_in_stock=sum(i > 0 for i in history_list_of_existing_qty)
history_list_of_new_qty=[int(i) for i in history_list_of_new_qty]
#Getting the number of oos products after the stock update
new_in_stock=sum(i > 0 for i in history_list_of_new_qty)
if(final_run_status=="Success"):
oos_items=len(list_of_oos_skus)
else:
oos_items=0
elapsed=""
stats_dict={
"supplier_name":str(supplier_name),
"run_date":str(current_date),
"run_status": str(final_run_status),
"oos_items":str(oos_items),
"prev_instock":str(prev_in_stock),
"new_instock":str(new_in_stock),
"stock_update_approved":"False",
"comments":str(comments),
"time_taken":str(elapsed)[0:8]
}
data={'apikey':'findsportsapikey12345','stats':str(stats_dict)}
r = requests.post(url = API_ENDPOINT,data=json.dumps(data))
pastebin_url = r.text
print(pastebin_url)
# In[ ]:
| UTF-8 | Python | false | false | 10,571 | py | 301 | Liive.py | 177 | 0.631255 | 0.6007 | 0 | 341 | 29.98827 | 199 |
varuntirthani/Movie-Recommendation-AIR | 2,310,692,432,314 | 05637e243d6c108cf7be577d5675accd1a369bf8 | 528210d0e4b7334e4ae371283062840a4c7fc5e0 | /search.py | 4951d1a1c368d5b95bf0cca7f9ccbf52cdfbba91 | [
"Apache-2.0"
]
| permissive | https://github.com/varuntirthani/Movie-Recommendation-AIR | 1cb1c81d55f707a4877bccb383893e0558c5535c | bbc4d0fd119dd459d8bbea871aa793943f92161a | refs/heads/main | 2023-04-08T14:42:05.435242 | 2021-04-16T11:47:41 | 2021-04-16T11:47:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import json
import boolean
from nltk.stem import PorterStemmer
import string
letters = list(string.ascii_lowercase)
letters.append(list(string.ascii_uppercase))
stemmer=PorterStemmer()
class Conversion:
def __init__(self, capacity):
self.top = -1
self.capacity = capacity
self.array = []
self.output = []
self.precedence = {'|':1, '&':2, '~':3}
def isEmpty(self):
return True if self.top == -1 else False
def peek(self):
return self.array[-1]
def pop(self):
if not self.isEmpty():
self.top -= 1
return self.array.pop()
else:
return "$"
def push(self, op):
self.top += 1
self.array.append(op)
def isOperand(self, ch):
return ch.isalpha()
def notGreater(self, i):
try:
a = self.precedence[i]
b = self.precedence[self.peek()]
return True if a <= b else False
except KeyError:
return False
def infixToPostfix(self, exp):
for i in exp:
if self.isOperand(i):
self.output.append(i)
elif i == '(':
self.push(i)
elif i == ')':
while( (not self.isEmpty()) and
self.peek() != '('):
a = self.pop()
self.output.append(a)
if (not self.isEmpty() and self.peek() != '('):
return -1
else:
self.pop()
else:
while(not self.isEmpty() and self.notGreater(i)):
self.output.append(self.pop())
self.push(i)
while not self.isEmpty():
self.output.append(self.pop())
return "".join(self.output)
class Parse():
def __init__(self, query):
self.query = query
def transform(self):
split_query = self.query.split(" ")
for i in range(0, len(split_query)):
if(split_query[i] != "and" and split_query[i] != "or" and split_query[i] != "not"):
split_query[i] = split_query[i] + 'sep'
self.query = " ".join(split_query)
def parser(self):
self.transform()
algebra = boolean.BooleanAlgebra()
try:
res = algebra.parse(self.query)
except SyntaxError:
raise SyntaxError("Invalid syntax in query")
self.query = str(res)
self.convert_to_postfix()
return self.query
def convert_to_postfix(self):
q = Conversion(len(self.query))
self.query = q.infixToPostfix(self.query)
class Execute:
def __init__(self, capacity, vocab):
self.top = -1
self.capacity = capacity
self.array = []
self.vocab = vocab
def isEmpty(self):
return True if self.top == -1 else False
def peek(self):
return self.array[-1]
def pop(self):
if not self.isEmpty():
self.top -= 1
return self.array.pop()
else:
return "$"
def push(self, op):
self.top += 1
self.array.append(op)
def exec(self, exp):
op = Operators(self.vocab)
exp = exp.split("sep")
try:
if(exp[2][0] == '&' and exp[2][1] in letters or exp[2][0] == '|' and exp[2][1] in letters or exp[2][0] == '~' and exp[2][1] in letters):
c = exp[2][0]
exp[2] = exp[2][1:]
exp[3] = exp[3] + c
except:
pass
for i in exp:
if '&' not in i and '|' not in i and '~' not in i:
try:
self.push(self.vocab[stemmer.stem(i)])
except KeyError:
return ["Does not exist in the database"]
else:
for char in i:
if(char == '&'):
val1 = self.pop()
val2 = self.pop()
res = op.intersection(val1, val2)
elif(char == '~'):
val1 = self.pop()
res = op.diff(val1)
else:
val1 = self.pop()
val2 = self.pop()
res = op.union(val1, val2)
self.push(res)
temp = self.pop()
self.array = []
return temp
class Operators:
def __init__(self, global_vocab):
self.vocab = global_vocab
def intersection(self, operand1, operand2):
return list(set(operand1) & set(operand2))
def union(self, operand1, operand2):
return list(set(operand1).union(set(operand2)))
def diff(self, operand1):
return (list(list(set(self.vocab)-set(operand1)) + list(set(operand1)-set(self.vocab))))
class BuildIndex:
def __init__(self, index):
self.index = index
def check_key(self, key, global_vocab):
if key in global_vocab:
return True
return False
def build(self):
global_vocab = {}
for term in self.index:
if not self.check_key(term, global_vocab):
global_vocab[term] = []
for movies in self.index[term]:
if movies not in global_vocab[term]:
global_vocab[term].append(movies)
return global_vocab
if __name__ == "__main__":
with open('index.json', 'r') as fp:
index = json.load(fp)
vocab = BuildIndex(index).build()
query = input("QUERY: ")
while query != "exit" and query != "quit":
parsed_query = Parse(query).parser()
results = Execute(len(parsed_query), vocab).exec(parsed_query)
print("RESULT:")
if(len(results) == 0):
print("No matching results found")
else:
for result in results:
print(result)
print("\n")
query = input("QUERY: ")
if(query == "exit" or query == "quit"):
sys.exit(0)
sys.exit(0)
| UTF-8 | Python | false | false | 6,259 | py | 8 | search.py | 4 | 0.473878 | 0.464611 | 0 | 236 | 25.5 | 148 |
Brady31027/leetcode | 11,879,879,555,400 | 33660d6f986e9e0f341994ad9deb6dd96ef6a3e3 | 46c8c0e435877f4564970198e8a177e9883103e9 | /260_Single_Number_III/single_number_iii.py | df10cab1445001c5f2f38837772255833a614b56 | []
| no_license | https://github.com/Brady31027/leetcode | 26f7c1f4e8bfad0dee4d819f91aa93a241223330 | d66be3a8f002875097754df6138c704e28b79810 | refs/heads/master | 2018-02-16T14:52:31.976844 | 2017-08-10T19:18:59 | 2017-08-10T19:18:59 | 63,519,661 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
missingOne, missingTwo = None, None
nums.sort()
for num in nums:
if num == missingOne:
missingOne = None
continue
elif num == missingTwo:
missingTwo = None
continue
else:
if missingOne == None:
missingOne = num
elif missingTwo == None:
missingTwo = num
else:
return [missingOne, missingTwo]
return [missingOne, missingTwo]
| UTF-8 | Python | false | false | 701 | py | 267 | single_number_iii.py | 225 | 0.446505 | 0.446505 | 0 | 23 | 29.478261 | 51 |
gmrzone/simpleDjangoBlog | 412,316,867,500 | 43892597b9fc4742defcbcd6650b49a63ab13218 | 998ad79189683aca493d3dbf8898657573794c0d | /myblog/feed.py | 470370f496945fab9aa965d82d032ddcac5fdd40 | []
| no_license | https://github.com/gmrzone/simpleDjangoBlog | 2585cf08ef029cf4b177133978c391da73f6235c | 6b4d0b72e6a85b5e649f1af31ab84c7523db82e8 | refs/heads/master | 2023-05-01T15:07:59.304048 | 2021-05-20T14:22:51 | 2021-05-20T14:22:51 | 327,601,362 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .models import Post
from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatechars
from django.urls import reverse_lazy
class PostFeed(Feed):
title = "MyBlog Latest Feed"
link = ""
description = 'Latest Post From Our Blog'
def items(self):
return Post.published.all().order_by('-publish')[:10]
def item_link(self, item):
return reverse_lazy('post_details', args=[item.publish.year, item.publish.month, item.publish.day, item.slug])
def item_title(self, item):
return item.title
def item_description(self, item):
return truncatechars(item.body, 25) | UTF-8 | Python | false | false | 661 | py | 26 | feed.py | 14 | 0.700454 | 0.694402 | 0 | 21 | 30.52381 | 118 |
Rajeev2k11/blockchain | 11,338,713,690,223 | f75355be8bf340b48953e720a547b4ecba8bc3d1 | 17396c62b7789620f24385664cfb91e934df5f40 | /blockchain.py | 9e0acce65437096682fae143ec4ba39481048a91 | []
| no_license | https://github.com/Rajeev2k11/blockchain | 37106d8677e60f9eaacd2737ef576e04fa49d397 | 88222044ac21f066da76a34e29571ffaf81d14c7 | refs/heads/master | 2020-05-09T13:13:36.409632 | 2019-04-13T08:37:44 | 2019-04-13T08:37:44 | 181,143,240 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 12 20:45:16 2019
@author: rajeevranjan
"""
| UTF-8 | Python | false | false | 115 | py | 1 | blockchain.py | 1 | 0.617391 | 0.495652 | 0 | 7 | 15.285714 | 35 |
beatonma/snommoc | 9,792,525,449,343 | 900f02b1378c94e6442fae64c62586c93d3f4827 | 55647258df0565f19179ffb97ac217708d84ba4a | /repository/resolution/constituency.py | e6736f0d4ef2ce1251b5ce6944c3e7e401536609 | []
| no_license | https://github.com/beatonma/snommoc | 25de0e81af0d9940bdc3aa6420cb5764d50c6d11 | 0a9d37dcad112c5dd98609c1566e74176ae3d89d | refs/heads/main | 2022-03-11T07:53:33.038649 | 2022-03-05T17:03:56 | 2022-03-05T17:03:56 | 188,595,195 | 0 | 0 | null | false | 2022-02-18T17:54:30 | 2019-05-25T17:35:58 | 2021-11-30T14:35:17 | 2022-02-18T17:54:30 | 3,397 | 0 | 0 | 1 | Python | false | false | import logging
import re
from datetime import date as _date
from functools import reduce
from operator import __or__
from typing import List, Optional
from django.db.models import Q
from repository.models import (
Constituency,
ConstituencyAlsoKnownAs,
ConstituencyResult,
ContestedElection,
UnlinkedConstituency,
)
from repository.models.mixins import PeriodMixin
from util.time import get_today
log = logging.getLogger(__name__)
def get_active_constituencies(**kwargs):
return Constituency.objects.filter(end__isnull=True, **kwargs)
def get_constituency_for_date(
name: str,
date: Optional[_date],
) -> Optional[Constituency]:
def _generalised_filter(n: str):
"""Remove punctuation, conjunctions, etc which may not be formatted the
same way from different sources e.g. 'and' vs '&'."""
name_regex = (
re.escape(n)
.replace(",", ",?")
.replace("\&", "(&|and)")
.replace(" and\ ", " (&|and)\ ")
)
return {"name__iregex": name_regex}
if name is None:
return None
if date is None:
date = get_today()
c = Constituency.objects.filter(**_generalised_filter(name))
count = c.count()
# Simple cases
if count == 0:
# Name not found - try and resolve the constituency using ConstituencyAlsoKnownAs.
try:
return (
ConstituencyAlsoKnownAs.objects.filter(name=name)
.filter(PeriodMixin.get_date_in_period_filter(date))
.first()
.canonical
)
except Exception as e:
log.info(f"No ConstituencyAKA found for name={name}, date={date}: {e}")
return None
elif count == 1:
# Name found and only one result so no further filtering required
return c.first()
# There are multiple results so we have to try filtering by date
with_valid_date = c.exclude(start=None).order_by("start")
filtered_by_date = with_valid_date.filter(
PeriodMixin.get_date_in_period_filter(date)
)
if filtered_by_date:
# Result was found that matches the date requirement
return filtered_by_date.first()
earliest = with_valid_date.first()
if earliest.start > date:
# Date is before earliest result -> return earliest available result
return earliest
# All else fails, return the most recent available result.
return with_valid_date.last()
def get_current_constituency(name: str) -> Optional[Constituency]:
return get_constituency_for_date(name, get_today())
def get_suggested_constituencies(name: str, date: _date) -> List[Constituency]:
"""
Return a list of constituencies that existed at the time of the election
and have a similar [i.e. matching word(s)] name
"""
def _remove_substrings(string, chars: list) -> str:
for c in chars:
string = string.replace(c, "")
return string
def _remove_words(string, words: list) -> str:
for word in words:
string = re.sub(rf"\b{word}\b", "", string)
string = string.replace(" ", " ")
return string
stripped_name = _remove_substrings(name, ["&", ","])
stripped_name = _remove_words(
stripped_name, ["and", "East", "West", "North", "South"]
)
stripped_name = re.sub(
r"\s+", " ", stripped_name
) # Ensure remaining words are separated by only one space
name_chunks = stripped_name.split(" ")[:5]
if not name_chunks:
return []
name_filters = reduce(__or__, [Q(name__icontains=x) for x in name_chunks])
date_filter = PeriodMixin.get_date_in_period_filter(date)
suggestions = Constituency.objects.filter(date_filter).filter(name_filters)
if not suggestions:
# If no result using date and name, try again just using name
suggestions = Constituency.objects.filter(name_filters)
return suggestions
def resolve_unlinked_constituency(
unlinked: UnlinkedConstituency, canonical: Constituency
):
"""
Consume an UnlinkedConstituency instance by linking it with a canonical Constituency.
Creates an instance of ConstituencyAlsoKnownAs so this can be resolved automatically in the future.
If person_won, this should be used to create a ConstituencyResult object.
Otherwise, this should be used to create a ContestedElection object.
"""
ConstituencyAlsoKnownAs.objects.update_or_create(
name=unlinked.name,
canonical=canonical,
defaults={
"start": canonical.start,
"end": canonical.end,
},
)
if unlinked.person_won:
ConstituencyResult.objects.update_or_create(
constituency=canonical,
election=unlinked.election,
defaults={
"mp": unlinked.person,
"start": unlinked.start,
"end": unlinked.end,
},
)
else:
ContestedElection.objects.update_or_create(
person=unlinked.person,
election=unlinked.election,
defaults={
"constituency": canonical,
},
)
unlinked.delete()
| UTF-8 | Python | false | false | 5,249 | py | 354 | constituency.py | 340 | 0.622214 | 0.621642 | 0 | 178 | 28.488764 | 103 |
poldy/16k_muds | 15,255,723,841,983 | a2b38800180a67ec5761f60ef6d756b81f6bbdf0 | aaec250953a41feb95154f8c2bc9a5896c7a6e7e | /icecube@ihug.co.nz/sys/static.py | 3e002619ae5db5e787731ccd0afaf5a64797dece | []
| no_license | https://github.com/poldy/16k_muds | 2eee2053c23d2267cce961d6b94c9cd74f9ad05c | ecb1d4cf1d1b20fa4b1721c978681d3b1a1d5ff5 | refs/heads/master | 2021-05-28T18:36:45.824871 | 2013-05-26T15:19:25 | 2013-05-26T15:19:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Static data object
class static(Object):
singleton = 1
def __init__(S):
S.loadhook()
def loadhook(S):
# Heh.
pass
#S.badcmd = 'Unknown command.\n'
#S.whohdr = 'Users currently connected:\n'
#S.bye = 'Quitting. Goodbye!\n'
#S.user_fsm = {
# 'start': ('pyGmy incarnation #1.\nTo create a new character, log in as the name you want.\n\n', 'login'),
# 'login': ('login: ', None),
# 'badname': ('Names may only be alphabetic.\n', 'login'),
# 'oldpwd': ('Existing player.\nPassword: ', None),
# 'wrongpwd': ('Wrong password.\n', 'login'),
# 'newplr': ('New player.\n', 'newpwd1'),
# 'newpwd1': ('Choose a password (blank to abort): ', None),
# 'newpwd2': ('Reenter your password: ', None),
# 'nomatch': ("Passwords don't match.\n", 'newpwd1'),
# 'cantcreate': ('Sorry, that name is now taken.\n', None),
# 'welcome': ('Successfully logged in.\n', 'cmd'),
# 'cmd': ('', None)
# }
#S.ftp_fsm = {
# 'start': ('', 'auth'),
# 'auth': ('', None),
# 'welcome': ('OK pyGmyMudFtp 2.0 ready.\n', 'waitpush'),
# 'waitpush': ('', None),
# 'push': ('OK pushing you data\n', 'idle'),
# 'idle': ('', None),
# 'error': ('FAILED some random error\n', 'idle'),
# 'perror': ('FAILED only push mode supported\n', 'waitpush'),
# 'noop': ('OK idling.\n', 'cmd'),
# 'send': ('', None), tostate() never called
# 'recv': ('', None),
# 'close': ('FAILED\n', None),
# }
def __getattr__(S, attr):
if attr[0] == '_':
raise AttributeError, attr
else:
return '<missing static data ' + attr + '>'
| UTF-8 | Python | false | false | 1,905 | py | 94 | static.py | 37 | 0.453018 | 0.448294 | 0 | 50 | 36.98 | 118 |
fabiano-araujo/game_news | 8,787,503,097,795 | 2f19b99234d9ced79570a892f555e1cbe2d8d165 | e02da9082018d65e02077decb1ec05b0a011f071 | /server/admin.py | f6eccb57f5666ad6ce8bd996c080764a29639909 | []
| no_license | https://github.com/fabiano-araujo/game_news | 0f85af1d9325f7fac46179059e21e4fa9965e9fa | 3b86341eb92e259ffefe4fc2921a7de9b401728f | refs/heads/master | 2021-01-09T20:42:30.871093 | 2016-06-30T03:13:00 | 2016-06-30T03:13:00 | 62,275,392 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from server.models import *
# Register your models here.
class ArtigoAdmin(admin.ModelAdmin):
search_fields = ['titulo']
list_display = ['foto','titulo']
list_filter = ['titulo']
save_on_top = True
class PAdmin(admin.ModelAdmin):
search_fields = ['titulo']
list_display = ['foto','titulo']
list_filter = ['titulo']
save_on_top = True
admin.site.register(Artigo,ArtigoAdmin)
admin.site.register(P,PAdmin) | UTF-8 | Python | false | false | 453 | py | 7 | admin.py | 4 | 0.713024 | 0.713024 | 0 | 17 | 25.705882 | 40 |
albertskog/Wakeup-Light | 14,860,586,876,825 | 0162e9b07cc890725c964991f83acf78a8ff1dbc | 40a486c3c9dc52b3fda522009b0b32bf45f1e68c | /pi/set-rgb.py | 1312168e65988163b41d1b37cf3a37ccfa366bc3 | []
| no_license | https://github.com/albertskog/Wakeup-Light | 9ccbbd29347d4dbf75999391fb94719edf33a2d2 | 15c5dab1d8bcf047a12a74e33d267ccd536aedd0 | refs/heads/master | 2016-09-06T01:52:16.111184 | 2015-09-20T20:59:41 | 2015-09-20T20:59:41 | 26,997,222 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys
import paho.mqtt.publish as mqtt
if len(sys.argv) == 1:
print "No argument given"
exit()
if len(sys.argv) == 2:
red = sys.argv[1]
green = sys.argv[1]
blue = sys.argv[1]
if len(sys.argv) >= 4:
red = sys.argv[1]
green = sys.argv[2]
blue = sys.argv[3]
command = '{{"red":{0}, "green":{1}, "blue":{2}}}'.format(red, green, blue)
print command
mqtt.single("lights/rgb", command, hostname="192.168.1.109")
| UTF-8 | Python | false | false | 445 | py | 11 | set-rgb.py | 7 | 0.626966 | 0.577528 | 0 | 25 | 16.8 | 75 |
easy-operation/BackstageSystem | 12,799,002,572,166 | 190dfc259c98353911d53d9e65af3b90cdc1eacd | d828ab8487d9f1552364f7fa6f04adfbfb1046f1 | /VMManagement/migrations/0005_auto_20180305_1700.py | 47b7263a105508d03a7ae6f9bb7d669024f7e9af | []
| no_license | https://github.com/easy-operation/BackstageSystem | ac9ec44570bfe964b609548e14eed31d113f5536 | 5762724224022a03410b22244142b9e0200f5d1c | refs/heads/master | 2021-01-25T13:05:35.181865 | 2018-03-09T09:29:05 | 2018-03-09T09:29:05 | 123,526,216 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.2 on 2018-03-05 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VMManagement', '0004_auto_20180305_1519'),
]
operations = [
migrations.RenameField(
model_name='serverinfo',
old_name='memory',
new_name='total_memory',
),
migrations.AddField(
model_name='serverinfo',
name='used_memory',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| UTF-8 | Python | false | false | 569 | py | 17 | 0005_auto_20180305_1700.py | 16 | 0.571178 | 0.513181 | 0 | 23 | 23.73913 | 73 |
gdcc/easyDataverse | 15,281,493,684,773 | 1e1c894103901ede1f75e275bdbba19d0ec07055 | 591ac2f03bb6a819f1086df00f925899a130f501 | /tests/conftest.py | 8bee1dc070c19aa576f993e723a5267b7b142166 | [
"MIT"
]
| permissive | https://github.com/gdcc/easyDataverse | 8b35a4a854a4a8fa39e067cbf690a2aa4df5bd23 | efb15359e7f2a962d7fecd07d90e0ba62029c526 | refs/heads/main | 2023-09-01T05:09:05.121070 | 2023-04-20T17:01:55 | 2023-04-20T17:01:55 | 504,152,566 | 7 | 2 | MIT | false | 2023-04-27T20:50:11 | 2022-06-16T12:46:34 | 2023-04-20T17:42:23 | 2023-04-27T20:43:41 | 249 | 6 | 2 | 4 | Python | false | false | import pytest
from easyDataverse import Dataset
from easyDataverse.core.base import DataverseBase
from tests.fixtures.dataset.toydataset import ToyDataset, SomeEnum
from tests.fixtures.dataset.invalidclass import InvalidBlock, AnotherEnum
@pytest.fixture
def metadatablock():
"""Simple artificial metadatablock."""
block = ToyDataset(foo="foo", some_enum=SomeEnum.enum)
block.add_compound("bar")
return block
@pytest.fixture
def toy_dataset():
"""Simple artificial metadatablock."""
# Set up the metadatablock
block = ToyDataset(foo="foo", some_enum=SomeEnum.enum)
block.add_compound("bar")
# Add to dataset
dataset = Dataset()
dataset.add_metadatablock(block)
return dataset
@pytest.fixture
def invalid_block():
"""Simple artificial class that looks similar to a valid block, but has invalid parent"""
block = InvalidBlock(foo="foo", some_enum=AnotherEnum.enum_field)
block.add_compound("bar")
return block
@pytest.fixture
def dataverse_json():
"""Expected JSON output when passed to pyDataverse"""
return open("./tests/fixtures/dataset/dataverse_json_output.json").read()
@pytest.fixture
def yaml_input():
"""YAML file used to initialize a dataset"""
return open("./tests/fixtures/yaml_output.yaml").read()
@pytest.fixture
def dataverse_base_class():
"""Sets up a dummy class to test the base class"""
class Test(DataverseBase):
foo: str
bar: str
return Test
@pytest.fixture
def metadatablock_json_schema():
"""Sets up a metadatablock json schema"""
return open("./tests/fixtures/dataversebase/toydataset.schema.json").read()
| UTF-8 | Python | false | false | 1,672 | py | 74 | conftest.py | 42 | 0.70933 | 0.70933 | 0 | 74 | 21.594595 | 93 |
SebTee/foobarGoogle | 4,286,377,365,971 | df2f017a132d520e889f1c2790d6c136045b1e38 | ef84aac2da1a83c55c7bcec26101c93b7f52cb3a | /escape-pods/solution.py | 196fb5f0431c8df676aa64b8860dc23107d2df2f | []
| no_license | https://github.com/SebTee/foobarGoogle | 4f63ede9d31193c5b88d7b320239f968102ac0e5 | 76ec09b7126663c2a9c522fac756700c87ae81f8 | refs/heads/main | 2023-04-03T20:43:29.334480 | 2021-04-09T09:16:37 | 2021-04-09T09:16:37 | 338,909,851 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def consolidate_sink_source(entrances, exits, path, max_corridor_size):
consolidated_source, consolidated_sink = [0] * (len(path) + 2), [0] * (len(path) + 2)
for entrance in entrances:
consolidated_source[entrance + 1] = max_corridor_size
flow_graph = []
for node_id in range(len(path)):
sink_flow = 0
if node_id in exits:
sink_flow = max_corridor_size
flow_graph.append([0] + path[node_id] + [sink_flow])
flow_graph.append(consolidated_sink)
flow_graph = [consolidated_source] + flow_graph
return flow_graph, 0, len(flow_graph) - 1
def find_maximum_flow(residual_flow_graph, source, sink, max_corridor_size): # Ford-Fulkerson Algorithm
current_flow = 0
augmenting_path, augmenting_path_flow = find_augmenting_path(residual_flow_graph, [source], max_corridor_size, sink)
while augmenting_path:
current_flow += augmenting_path_flow
residual_flow_graph = update_residual_flow_graph(residual_flow_graph, augmenting_path, augmenting_path_flow)
augmenting_path, augmenting_path_flow = find_augmenting_path(residual_flow_graph,
[source],
max_corridor_size,
sink)
return current_flow
def find_augmenting_path(residual_flow_graph, augmenting_path, augmenting_path_flow, sink):
from_node = augmenting_path[-1]
for to_node in [x for x in range(len(residual_flow_graph)) if x not in augmenting_path]:
flow = residual_flow_graph[from_node][to_node]
if flow > 0:
new_augmenting_path, new_augmenting_path_flow = augmenting_path + [to_node], min(augmenting_path_flow, flow)
if to_node == sink:
return new_augmenting_path, new_augmenting_path_flow
new_augmenting_path, new_augmenting_path_flow = find_augmenting_path(residual_flow_graph,
new_augmenting_path,
new_augmenting_path_flow,
sink)
if new_augmenting_path_flow > 0:
return new_augmenting_path, new_augmenting_path_flow
return [], 0
def update_residual_flow_graph(residual_flow_graph, augmenting_path, augmenting_path_flow):
for from_node, to_node in zip(augmenting_path[:-1], augmenting_path[1:]):
residual_flow_graph[from_node][to_node] -= augmenting_path_flow
residual_flow_graph[to_node][from_node] += augmenting_path_flow
return residual_flow_graph
def solution(entrances, exits, path):
max_corridor_size = 2000000
residual_flow_graph, source, sink = consolidate_sink_source(entrances, exits, path, max_corridor_size)
maximum_flow = find_maximum_flow(residual_flow_graph, source, sink, max_corridor_size)
return maximum_flow
print(solution([0], [3],
[[0, 7, 0, 0],
[0, 0, 6, 0],
[0, 0, 0, 8],
[9, 0, 0, 0]]))
# 6
print(solution([0, 1], [4, 5],
[[0, 0, 4, 6, 0, 0],
[0, 0, 5, 2, 0, 0],
[0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]))
# 16
| UTF-8 | Python | false | false | 3,498 | py | 10 | solution.py | 9 | 0.542882 | 0.518868 | 0 | 74 | 46.27027 | 120 |
azra-mind/be-azramind | 4,320,737,141,495 | d1d0119e7d5756f4cc5b267ce2016bada078543e | eb3e24297e01c7b4fb32a4213d493a45ba24ccf3 | /models/user.py | 554992732a090ca202853d1452db26ba034c65dd | []
| no_license | https://github.com/azra-mind/be-azramind | c586a59d20ee2ce0d3acd991d1ef734c0b81636a | 46986c24d77335764f0703a878f42db9edd14373 | refs/heads/master | 2021-07-11T10:25:30.237368 | 2020-02-20T06:15:37 | 2020-02-20T06:15:37 | 239,374,245 | 0 | 0 | null | false | 2021-03-20T02:58:15 | 2020-02-09T21:06:01 | 2020-05-07T20:07:17 | 2021-03-20T02:58:14 | 73 | 0 | 0 | 2 | Python | false | false | from db import db
import random
# each model will be an extension of db.Model class
class UserModel(db.Model):
# tell SQLAlchemy the tablename
__tablename__ = 'users'
# define the schema for the users table
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80))
password = db.Column(db.String(80))
# establish relationship with the child model i.e. ScoreModel
# lazy = dynamic returns the query object so it can be further sliced (.eg .all(), .first())
scores = db.relationship('ScoreModel', lazy='dynamic')
# initializing user class
def __init__(self, username, password=f"Super{random.randrange(511, 11571)}"):
self.username = username
self.password = password
# find user by id
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
# find user by id
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
# adding a user to the db
def save_to_db(self):
db.session.add(self)
db.session.commit()
# json for a username
def json_username(self):
return {'id': self.id, 'username': self.username}
# json for getting scores by user
def json_scores(self):
return {'username': self.username, 'scores': [score.json() for score in self.scores.all()]}
| UTF-8 | Python | false | false | 1,414 | py | 6 | user.py | 5 | 0.649929 | 0.641443 | 0 | 47 | 29.085106 | 99 |
uroborus/synapse | 5,866,925,334,493 | 0c70b1987b98f7a3e1d7f5a7a6a4747a2455198f | c4cfe7b67a2980cd99b5a1315fc73f77f1f76f2f | /synapse/config/ratelimiting.py | f126782b8db13b772335f72d15e7ddd5f18115d9 | [
"Apache-2.0"
]
| permissive | https://github.com/uroborus/synapse | a271a4cc9d69e34e8d7bfb985f5b171699e367d5 | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | refs/heads/master | 2021-01-17T17:12:55.371236 | 2014-09-19T10:41:49 | 2014-09-19T10:41:49 | 24,245,059 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class RatelimitConfig(Config):
def __init__(self, args):
super(RatelimitConfig, self).__init__(args)
self.rc_messages_per_second = args.rc_messages_per_second
self.rc_message_burst_count = args.rc_message_burst_count
@classmethod
def add_arguments(cls, parser):
super(RatelimitConfig, cls).add_arguments(parser)
rc_group = parser.add_argument_group("ratelimiting")
rc_group.add_argument(
"--rc-messages-per-second", type=float, default=0.2,
help="number of messages a client can send per second"
)
rc_group.add_argument(
"--rc-message-burst-count", type=float, default=10,
help="number of message a client can send before being throttled"
)
| UTF-8 | Python | false | false | 1,381 | py | 131 | ratelimiting.py | 103 | 0.687907 | 0.679218 | 0 | 35 | 38.457143 | 77 |
SS4G/ydy | 12,326,556,168,248 | 11c0163b5dcd7231f57bd537346204f6be49f274 | b232b26af08e8b9c84139a35e7ab42188a14309f | /ydy_web/apps.py | f353a6cb6036280beccf7510446db29e08d07191 | []
| no_license | https://github.com/SS4G/ydy | c6116d216b5eeafc450dc16625bad0a0bacc4f46 | 123a0a04fe8f43010aa4de8932915052cfb0132f | refs/heads/master | 2021-01-19T01:31:11.682393 | 2016-11-10T03:28:21 | 2016-11-10T03:28:21 | 72,983,878 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class YdyWebConfig(AppConfig):
name = 'ydy_web'
| UTF-8 | Python | false | false | 88 | py | 16 | apps.py | 5 | 0.738636 | 0.738636 | 0 | 5 | 16.6 | 33 |
immersinn/cia_library | 9,835,475,125,304 | d1c98a206a8d39c27dfc9409c598702566e8d70c | e6d72fb7c9bd1a15b325b018eac4e7d33ca12635 | /src/text_ocr.py | 5956389a0248cb11472c1f36feaab563f2517a24 | [
"MIT"
]
| permissive | https://github.com/immersinn/cia_library | 59500de77c09d4b25005567d8b35c2c356bfe1b1 | 837e980df1bb0a8166b1616169c1751610e5e0bc | refs/heads/master | 2021-01-11T17:11:20.508019 | 2017-02-01T21:10:21 | 2017-02-01T21:10:21 | 79,736,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 22 17:02:14 2017
@author: immersinn
"""
import textract
def textFromPDF(pdf):
"""
"""
text = textract.process(pdf, method="tesseract", language="eng")
return(text) | UTF-8 | Python | false | false | 258 | py | 17 | text_ocr.py | 11 | 0.608527 | 0.554264 | 0 | 17 | 14.235294 | 68 |
alantop0720/PythonCommon | 2,439,541,441,762 | 635dd7b9e780a4718fc24cfc2b9522510faec843 | 04e94400e463e04bf23a671f451843ed800a60b5 | /pyside6-source/concurrent/bad_example_2.py | eff63fead8ac7e823ecd001bbd70946830c54756 | []
| no_license | https://github.com/alantop0720/PythonCommon | b72e7e4bd4986276611499cc5770e2b89dc6ac2e | 0e5da74f1d3af56d9ba7fb951d77ce60380b8c9a | refs/heads/master | 2023-04-15T12:57:40.725169 | 2023-04-09T13:31:30 | 2023-04-09T13:31:30 | 162,530,480 | 1 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import time
from PySide6.QtCore import QTimer
from PySide6.QtWidgets import (
QApplication,
QLabel,
QMainWindow,
QPushButton,
QVBoxLayout,
QWidget,
)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.counter = 0
layout = QVBoxLayout()
self.l = QLabel("Start")
b = QPushButton("DANGER!")
b.pressed.connect(self.oh_no)
c = QPushButton("?")
c.pressed.connect(self.change_message)
layout.addWidget(self.l)
layout.addWidget(b)
layout.addWidget(c)
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.show()
def change_message(self):
self.message = "OH NO"
def oh_no(self):
self.message = "Pressed"
for _ in range(100):
time.sleep(0.1)
self.l.setText(self.message)
QApplication.processEvents()
import os
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = "D:/alantop_dir/alantop_sde/anaconda3/Lib/site-packages/PySide6/plugins/platforms"
app = QApplication(sys.argv)
window = MainWindow()
app.exec()
| UTF-8 | Python | false | false | 1,166 | py | 145 | bad_example_2.py | 132 | 0.602058 | 0.593482 | 0 | 57 | 19.45614 | 126 |
hajinhoe-backup/Tetris | 1,520,418,461,776 | a34a957d31a562bbd064a595df3cd0ac47c32f83 | 69099a7ed2f6ecb68dba54c305bfba4c2684b0ff | /play.py | 7882ccbc54d34b5f8eb36ee159a22055bf998f9b | []
| no_license | https://github.com/hajinhoe-backup/Tetris | a9f2328a00d1d9c34de0b75b8d20926101ab9188 | 66a1e183694ebe33a77cdec07227abd3075d1839 | refs/heads/master | 2022-02-26T01:50:10.853514 | 2017-06-14T08:12:57 | 2017-06-14T08:12:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
import random
import copy
import time
# Define some colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BLOCK_SIZE = 32
# Call this function so the Pygame library can initialize itself
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.mixer.init()
pygame.init()
# Create an 800x600 sized screen
screen = pygame.display.set_mode([520, 640])
# This sets the name of the window
pygame.display.set_caption('TETRIS')
clock = pygame.time.Clock()
# 0 ~ 6 색깔 있는 블럭 7 아직 저장안된 블럭 8 블럭이 없음
# Make a board (it originally 40x10, but I use 24x10 board, 4 is buffer)
board = [[8 for x in range(10)] for y in range(24)]
# 데이터들을 준비합니다.
pc_i = [[[0,1],[1,1],[2,1],[3,1]],[[2,0],[2,1],[2,2],[2,3]],[[0,2],[1,2],[2,2],[3,2]],[[1,0],[1,1],[1,2],[1,3]]]
pc_j = [[[0,1],[1,1],[2,1],[2,2]],[[2,0],[2,1],[2,2],[1,2]],[[0,1],[0,2],[1,2],[2,2]],[[2,0],[1,0],[1,1],[1,2]]]
pc_l = [[[0,1],[0,2],[1,1],[2,1]],[[0,0],[1,0],[1,1],[1,2]],[[2,0],[2,1],[1,1],[0,1]],[[0,0],[0,1],[0,2],[1,2]]]
pc_o = [[[0,1],[1,1],[0,2],[1,2]],[[0,1],[1,1],[0,2],[1,2]],[[0,1],[1,1],[0,2],[1,2]],[[0,1],[1,1],[0,2],[1,2]]]
pc_s = [[[1,1],[2,1],[0,2],[1,2]],[[1,0],[1,1],[2,1],[2,2]],[[1,1],[2,1],[0,2],[1,2]],[[0,0],[0,1],[1,1],[1,2]]]
pc_t = [[[0,1],[1,1],[2,1],[1,2]],[[1,0],[0,1],[1,1],[1,2]],[[1,0],[0,1],[1,1],[2,1]],[[1,0],[1,1],[2,1],[1,2]]]
pc_z = [[[0,1],[1,1],[1,2],[2,2]],[[2,0],[1,1],[2,1],[1,2]],[[0,0],[1,0],[1,1],[2,1]],[[2,0],[1,1],[2,1],[1,2]]]
pc_name = [pc_i, pc_j, pc_l, pc_o, pc_s, pc_t, pc_z]
block_i = pygame.image.load("img/block_i.png")
block_j = pygame.image.load("img/block_j.png")
block_l = pygame.image.load("img/block_l.png")
block_o = pygame.image.load("img/block_o.png")
block_s = pygame.image.load("img/block_s.png")
block_t = pygame.image.load("img/block_t.png")
block_z = pygame.image.load("img/block_z.png")
block_name = [block_i, block_j, block_l, block_o, block_s, block_t, block_z]
ghostblock = pygame.image.load("img/ghostblock.png")
pygame.display.set_icon(pygame.image.load("img/logo.png"))
done = False
make_piece = True
effect = False
TIME = 0
MOVE_TIME = 0
block_wait_time = 0
x_move = 0
y_move = 0
speed = 15
score = 50000
block_down = 1
hold_block = 8
block_wait = False
speed_temp = speed
gtimer = 0
conspeed = 0
pygame.mixer.Sound("sound/Korobeiniki.ogg").play(loops=-1)
click_sound = pygame.mixer.Sound("sound/click.ogg")
effect_sound = pygame.mixer.Sound("sound/jump-bump.ogg")
tak_stroke = pygame.mixer.Sound("sound/takstroke.ogg")
pre_block = random.randrange(0,7)
gameover = False
intro = True
lang = [["English", "img/back_eng.png"],["hangugeo", "img/back_ko.png"],["nihongo", "img/back_jp.png"]]
def intro(lang) : #메인화면
done = False
i = 0
while not done :
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
return 9
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return i
break
elif event.key == pygame.K_LEFT:
if i != 0 :
i -= 1
else :
i = 2
elif event.key == pygame.K_RIGHT:
if i != 2 :
i += 1
else :
i = 0
screen.blit(pygame.image.load("img/main_img.png"), [0, 0])
font = pygame.font.SysFont('Calibri', 48, True, False)
text = font.render(str("Press Space-bar"), False, WHITE)
screen.blit(text, [120, 300])
text = font.render(str("< " + lang[i][0] +" >"), False, WHITE)
screen.blit(text, [150, 400])
pygame.display.flip()
clock.tick(60)
start = intro(lang) #메인화면을 일단 불러옴
if start == 9 :
done = True
else :
back_img = pygame.image.load(lang[start][1])
gradea = pygame.image.load("img/gradea.png")
gradec = pygame.image.load("img/gradec.png")
gradef = pygame.image.load("img/gradef.png")
def info(): #정보창
while True :
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return False
elif event.key == pygame.K_i:
return False
screen.blit(pygame.image.load("img/info.png"), [70, 100])
pygame.display.flip()
clock.tick(60)
def hanyangi(): #하냥이창
x = 100
y = 80
isR = True
isL = False
count = 0
time = 0
while True :
if time == 120 :
if count > 19 :
return True
else :
return False
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
if isR :
x += 10
isR = False
isL = True
count += 1
elif event.key == pygame.K_LEFT:
if isL :
x -= 10
isL = False
isR = True
count +=1
pygame.draw.rect(screen, BLACK, [95, 50, 320, 535])
font = pygame.font.SysFont('Calibri', 48, True, False)
if count < 20 :
text = font.render(str("[") + str("=") * int(count/2) + str("]"), False, WHITE)
else :
text = font.render(str("[") + str("GOOOOOD") + str("]"), False, WHITE)
screen.blit(text, [150, 45])
screen.blit(pygame.image.load("img/hanyangi.jpg"), [x, y])
pygame.display.flip()
clock.tick(60)
time += 1
while not done: #본게임
if score < 3000 : #스코어에 따라 점수 잃기 및 속도가 조절됩니다.
if TIME == 10 :
score -= 3 + conspeed
speed = 12 + conspeed
elif score < 10000 :
if TIME%5 == 0 :
score -= 10 + conspeed * 3
speed = 6 + conspeed
else :
if TIME%2 == 0 :
score -= 50 + conspeed * 10
speed = 2 + conspeed # 2 미만으로 내리지 말 것
for event in pygame.event.get(): #테스트를 위하여 P를 누르면 100점씩 증가합니다. 기능키들이 모여있습니다.
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
y = gy
block_down = 0
score += gy * 2
elif event.key == pygame.K_LEFT:
x_move = -1
MOVE_TIME += 1
elif event.key == pygame.K_RIGHT:
x_move = 1
MOVE_TIME += 1
elif event.key == pygame.K_UP:
click_sound.play()
if change != 3:
pre_change = change + 1
else:
pre_change = 0
if not block_wait :
for i in range(4) :
if now_piece[pre_change][i][1] + y < 24 and -1 < now_piece[pre_change][i][0] + x < 10 :
if board[now_piece[pre_change][i][1] + y][now_piece[pre_change][i][0] + x] < 7:
pre_change = change
else :
change = pre_change
elif score > 9 :
up_piece = False
for i in range(4) :
if now_piece[pre_change][i][1] + y > 23 :
up_piece = True
elif now_piece[pre_change][i][1] + y < 24 and -1 < now_piece[pre_change][i][0] + x < 10 :
if board[now_piece[pre_change][i][1] + y][
now_piece[pre_change][i][0] + x] < 7 :
up_piece = True
change_piece = True
for i in range(4):
if now_piece[pre_change][i][1] + y - up_piece < 24 and -1 < now_piece[pre_change][i][0] + x < 10:
if board[now_piece[pre_change][i][1] + y - up_piece][now_piece[pre_change][i][0] + x] < 7 :
change_piece = False
else :
change_piece = False
if change_piece:
if up_piece:
y -= 1
for i in range(4) :
if(now_piece[pre_change][i][1] + y > 23) :
y -= 1
change = pre_change
for i in range(4):
if now_piece[change][i][1] + y == 23 or board[now_piece[change][i][1] + y + 1][
now_piece[change][i][0] + x] < 7:
down = False
block_wait_time = 0
score -= 10
gy = y
ghostloop = True
while ghostloop:
for i in range(4):
if now_piece[change][i][1] + gy == 23 or board[now_piece[change][i][1] + gy + 1][
now_piece[change][i][0] + x] < 7:
ghostloop = False
if ghostloop:
gy += 1
y = gy
for i in range(4):
if (now_piece[pre_change][i][1] + y > 23):
y -= 1
elif event.key == pygame.K_DOWN:
speed_temp = speed
if speed > 5 :
speed = 3
else :
speed = 1
elif event.key == pygame.K_u :
if conspeed > 2 :
conspeed -= 3
elif event.key == pygame.K_d :
if conspeed < 21 :
conspeed += 3
elif event.key == pygame.K_i :
done = info()
elif event.key == pygame.K_p :
score += 1000
elif event.key == pygame.K_h :
if hold_block == 8 :
hold_block = now_block
make_piece = True
else :
temp = now_block
now_block = hold_block
now_piece = pc_name[now_block]
hold_block = temp
elif event.type == pygame.KEYUP:
# If it is an arrow key, reset vector back to zero
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_move = 0
elif event.key == pygame.K_DOWN :
speed = speed_temp
if not speed == speed_temp : #소프트 드랍시 점수 제공을 위해서임.
if TIME == 0 :
if block_down == 1 :
score += 1
if gameover: #게임오버 화면 표출
while gameover :
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameover = False
done = True
if effect == True : #블럭 지워질떄용임
effect_sound.play()
pygame.time.wait(100)
effect = False
#make a piece
if make_piece == True :
change = 0
x = 3
y = 2
now_block = pre_block
pre_block = random.randrange(0,7)
now_piece = pc_name[now_block]
make_piece = False
display_board = copy.deepcopy(board)
for i in range(4) :
if now_piece[change][i][0] + x > 9 :
x -= 1
elif now_piece[change][i][0] + x < 0 :
x += 1
#display_board의 칸 값을 지금 조각이 위치한 경우라면 임시값인 2로 하자
for i in range(4) :
display_board[now_piece[change][i][1] + y][now_piece[change][i][0] + x] = 7
#ghostblock 구현
gy = y
ghostloop = True
while ghostloop :
for i in range(4) :
if now_piece[change][i][1] + gy == 23 or board[now_piece[change][i][1] + gy + 1][now_piece[change][i][0] + x] < 7 :
ghostloop = False
if ghostloop :
gy += 1
if not gy == y :
block_down = 1
#그리자
screen.blit(back_img, [0,0])
if score < 3000 :
screen.blit(gradef, [333, 420])
elif score < 10000 :
screen.blit(gradec, [333, 420])
else :
screen.blit(gradea, [333, 420])
for i in range(4) :
screen.blit(ghostblock, [(BLOCK_SIZE) * (now_piece[change][i][0] + x) + 2, (BLOCK_SIZE) * (now_piece[change][i][1] + gy - 4) + 2 - 4])
for row in range(4, 24) :
for col in range(10) :
color = BLACK
if display_board[row][col] < 7 :
screen.blit(block_name[display_board[row][col]], [(BLOCK_SIZE) * col + 2, (BLOCK_SIZE) * (row - 4) + 2 - 4])
elif display_board[row][col] == 7 :
screen.blit(block_name[now_block], [(BLOCK_SIZE) * col + 2, (BLOCK_SIZE) * (row - 4 ) + 2 - 4])
for i in range(4) :
screen.blit(block_name[pre_block], [32 * pc_name[pre_block][0][i][0] + 360, 32 * (pc_name[pre_block][0][i][1] - 4) + 265])
font = pygame.font.SysFont('Calibri', 48, True, False)
text = font.render(str(score), False, WHITE)
screen.blit(text, [350, 65])
if hold_block != 8 :
for i in range(4) :
screen.blit(block_name[hold_block],
[32 * pc_name[hold_block][0][i][0] + 360, 32 * (pc_name[hold_block][0][i][1] - 4) + 420])
if TIME < speed :
TIME += 1
else :
TIME = 0
y += block_down
if MOVE_TIME == 1 : #x좌표로 블럭이 겹침
if x_move != 0:
for i in range(4):
if x_move == -1 :
if now_piece[change][i][0] + x == 0 :
x_move = 0
else: #elif now_piece[change][i][0] + x - 1 != 0:
if board[now_piece[change][i][1] + y][now_piece[change][i][0] + x - 1] < 7:
x_move = 0
elif x_move == 1 :
if now_piece[change][i][0] + x == 9 :
x_move = 0
else : #elif now_piece[change][i][0] + x + 1 != 9 :
if board[now_piece[change][i][1] + y][now_piece[change][i][0] + x + 1] < 7:
x_move = 0
x += x_move
MOVE_TIME = 0
#하단이 다른 블럭과 닿는 경우또는 땅에 닿는 경우와 옆에 닿는 경우를 처리
for i in range(4):
if now_piece[change][i][1] + y == 23 or board[now_piece[change][i][1] + y + 1][now_piece[change][i][0] + x] < 7:
block_wait = True
block_down = 0
if score > 9 :
if x_move != 0 :
block_wait_time = 0
score -= 10
if block_wait_time > 20 :
for i in range(4):
if now_piece[change][i][1] + y < 4 :
gameover = True
board[now_piece[change][i][1] + y][now_piece[change][i][0] + x] = now_block
make_piece = True
tak_stroke.play()
block_wait_time = 0
block_down = 1
block_wait = False
break
if block_wait : #인피니트 로테이션 위함
block_wait_time += 1
messege_y = 0 #블럭지우면점수 뜨는 거
if make_piece == True : #새로운 블럭이 만들어질 떄, 이전 블럭으로 인해 지워지는지 확인함.
delblocknumber = 0
for i in range(4):
if y + i < 24 :
delthis = True
for j in board[y + i]:
if j > 6:
delthis = False
if delthis :
del board[y + i]
board = [[8, 8, 8, 8, 8, 8, 8, 8, 8, 8]] + board
delblocknumber += 1
pygame.draw.rect(screen,
BLACK,
[0,
32 * (y + i - 4),
322,
32])
messege_y = i
effect = True
font2 = pygame.font.SysFont('Calibri', 24, True, False)
if delblocknumber == 1 : #점수확인을 위함
score += 100
screen.blit(font2.render("You get 100 point", False, WHITE), [128, 32 * (y + messege_y - 4)])
elif delblocknumber == 2 :
score += 300
screen.blit(font2.render("You get 300 point", False, WHITE), [128, 32 * (y + messege_y - 4)])
elif delblocknumber == 3 :
score += 600
screen.blit(font2.render("You get 600 point", False, WHITE), [128, 32 * (y + messege_y - 4)])
elif delblocknumber == 4 :
score += 1000
screen.blit(font2.render("You get 1000 point", False, WHITE), [128, 32 * (y + messege_y - 4)])
if gameover == True : #게임을 일찍 끝내면 점수를 낮추고 늦게 끝내면 추가점수를 준다
if gtimer < 100000 :
score -= 50000
elif gtimer < 300000 :
score -= 10000
elif gtimer < 500000 :
score += 10000
else :
score += 50000
pygame.mixer.pause()
pygame.mixer.Sound("sound/gameover.ogg").play()
screen.fill(BLUE)
text = font.render("GAME OVER", False, WHITE)
screen.blit(text, [160, 240])
text = font.render("YOU GET :" + str(score), False, WHITE)
screen.blit(text, [160, 280])
if score < 3000:
screen.blit(gradef, [180, 360])
elif score < 10000:
screen.blit(gradec, [180, 360])
else:
screen.blit(gradea, [180, 360])
if random.randrange(0,10000) == 1 : #하냥이에 성공하면 점수를 주고, 판을 지워준다.
if hanyangi() :
for i in range(20) :
del board[23]
board = [[8, 8, 8, 8, 8, 8, 8, 8, 8, 8]] + board
score += 30000
effect_sound.play()
pygame.time.wait(50)
effect_sound.play()
pygame.time.wait(50)
effect_sound.play()
pygame.display.flip()
clock.tick(60)
gtimer += 1 #실행 시간 계산을 위함
pygame.quit()
| UTF-8 | Python | false | false | 18,983 | py | 2 | play.py | 1 | 0.44238 | 0.394268 | 0 | 503 | 35.487078 | 142 |
naivelamb/leetcode | 12,412,455,500,119 | aa5f8029a0e40e9d7d4067794310f24cb9ea7183 | 7426f3cde2c93d65ffa76c904ba477d26becef8d | /1021_RemoveOutermostParentheses.py | c6468c6338825ef8662dcb08d85bf7d82883f3d8 | []
| no_license | https://github.com/naivelamb/leetcode | 5e8ac58f606e0419a10d8fd9c1f532b4f9814017 | bae36601c3ce67f2b4a91d331a2d70dca8a1ee92 | refs/heads/master | 2021-07-23T20:38:06.265839 | 2021-07-12T16:14:21 | 2021-07-12T16:14:21 | 166,906,784 | 20 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/remove-outermost-parentheses/
Find segment, remove the most outer parentheses
Time complexity: O(n), n -> len(S)
"""
class Solution:
def removeOuterParentheses(self, S: str) -> str:
res = []
start, curr, balance = 0, 1, 1
while curr < len(S):
if S[curr] == '(':
balance += 1
else:
balance -= 1
if balance == 0:
res.append(S[start+1:curr ])
start = curr + 1
curr += 1
return ''.join(res)
s = Solution()
print(s.removeOuterParentheses('(()())(())')) | UTF-8 | Python | false | false | 664 | py | 753 | 1021_RemoveOutermostParentheses.py | 752 | 0.49247 | 0.47741 | 0 | 26 | 24.576923 | 59 |
cokotracy/d4e-common-v13 | 13,640,816,171,932 | a6858663c04a899d2fb4f115810293cd1c4b0632 | 8be5f3b93581b378e88c21ac10f5556533decbaf | /d4e_project_chatter/__manifest__.py | 7a43fa534f9a34fcab3402a82fb099b481a11146 | []
| no_license | https://github.com/cokotracy/d4e-common-v13 | edb0cd1adb356d6a2ea1690cdbc9cc74c7be1aac | 2261cdfa3da1cdd7a713a48bee450689aafa89c8 | refs/heads/main | 2023-06-19T03:33:01.353148 | 2021-06-07T14:06:48 | 2021-06-07T14:06:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
{
"name": "D4E Project Chatter",
"summary": "Add a chatter to the project form view.",
"version": "13.0.1.0.0",
"category": "Project",
"website": "https://www.d4e.cool",
"license": "AGPL-3",
"application": True,
"installable": True,
"depends": ["base","project"],
'data': [
'views/project_views.xml',
],
}
| UTF-8 | Python | false | false | 399 | py | 93 | __manifest__.py | 47 | 0.508772 | 0.483709 | 0 | 16 | 23.9375 | 57 |
shituniao/ftTexPac | 11,656,541,251,953 | b88041facd5b495a408801ebc6effc4d7fd16346 | f225204c26f600903bf5a7a7c8358d1a6c9b070d | /ftTexPac.py | 440531399f78744888f6891863e7b5034a1cd162 | [
"MIT"
]
| permissive | https://github.com/shituniao/ftTexPac | 76a56df7cf29f97e16dbbf783835aff642d81212 | e8b52f6f7e73c96b602d8a8f577995e9be410c28 | refs/heads/master | 2021-01-21T07:39:09.281034 | 2014-12-03T09:09:04 | 2014-12-03T09:09:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys, os, commands
import PIL.Image as Image
def goThrough(rootDir):
ans = []
root, dirs, files = os.walk(rootDir).next()
for f in files:
ans.append((os.path.join(root, f), f))
return ans
if len(sys.argv) != 2:
print 'usage: ftTexPac.py [PATH]'
exit(0)
path = sys.argv[1]
if not os.path.isdir(path):
print 'error: ' + path + ' is not a path!'
exit(0)
piclist = goThrough(path)
picList = []
for picPath, name in piclist:
isPic = True
try:
im = Image.open(picPath)
except IOError:
isPic = False
if isPic:
picList.append((name, im))
def picCmp(picA, picB):
nameA, imA = picA
nameB, imB = picB
for i in range(2):
if imA.size[i] != imB.size[i]:
return cmp(imB.size[i], imA.size[i])
return cmp(nameA, nameB)
picList.sort(picCmp)
picN = len(picList)
for name, im in picList:
print "%-50s %5s %5dx%-5d" % (name, im.format, im.size[0], im.size[1]), im.mode
if picN == 0:
print 'there is no pic in ' + path + '!'
exit(0)
global pxH
global use
global minH
global minX
global minL
global width
global height
global locList
use = [0] * 1000
pxH = [0] * 4096
locList = [(0, 0)] * 1000
def getMinHXL():
global minH
global minX
global minL
minX = 0
minH = height + 1
minL = 0
for i in range(width):
if pxH[i] < minH:
minX = i
minH = pxH[i]
for i in range(minX, width):
if pxH[i] == minH:
minL += 1
else:
break
def place(index, x, y):
use[index] = 1
locList[index] = (x, y)
for i in range(picList[index][1].size[0]):
if x + i >= width:
break
pxH[x + i] += picList[index][1].size[1]
if pxH[x + i] > height:
return False
getMinHXL()
return True
def killGap(x):
fillHeight = 2000000
if x - 1 >= 0:
if pxH[x - 1] < fillHeight:
fillHeight = pxH[x - 1]
if x + minL < width:
if pxH[x + minL] < fillHeight:
fillHeight = pxH[x + minL]
for i in range(x, x + minL):
pxH[i] = fillHeight
getMinHXL()
def work():
global minH
global minX
global minL
global pxH
global use
use = [0] * 1000
pxH = [0] * 4096
cur = 0
getMinHXL()
while True:
if cur >= picN:
break
if (use[cur] == 0) and (minL >= picList[cur][1].size[0]):
tmp = place(cur, minX, minH)
if not tmp:
return False
else:
if use[cur] == 1:
cur += 1
continue
if (minL == width) or (minH > height):
return False
gapCanFill = False
for fill in range(cur + 1, picN):
if (use[fill] == 0) and (minL >= picList[fill][1].size[0]):
tmp = place(fill, minX, minH)
if not tmp:
return False
gapCanFill = True
break
if not gapCanFill:
killGap(minX)
continue
cur += 1
ss = 0
for i in range(picN):
ss += use[i]
if ss == picN:
return True
else:
return False
find = False
for i in range(5, 12):
t = 2**i
width = t
height = t
if work():
find = True
break
width = t / 2
height = t * 2
if work():
find = True
break
width = t * 2
height = t
if work():
find = True
break
width = t
height = t * 2
if work():
find = True
break
if find:
print 'find it!'
bgcolor = (255, 255, 255, 0)
outImage = Image.new('RGBA', (width, height), bgcolor)
for i in range(picN):
outImage.paste(picList[i][1], locList[i])
outName = os.path.split(path)[-1]
if len(outName) == 0:
outName = os.path.split(path)[-2]
if cmp(outName, '.') == 0:
outName = 'pwd'
outImage.save(outName + '.png')
#sip represent for Sub Image Pool
#it is a file format for Fountain game engine
outFile = open(outName + '.sip', 'w')
outFile.write('%d %d\n' % (width, height))
outFile.write('%d\n' % picN)
outInfo = []
for i in range(picN):
name, im = picList[i]
size = im.size
pos = locList[i]
outInfo.append('%s %d %d %d %d\n' % (name, size[0], size[1], pos[0], pos[1]))
outFile.writelines(outInfo)
outFile.close()
else:
print 'sorry, not find a solution.'
| UTF-8 | Python | false | false | 4,590 | py | 2 | ftTexPac.py | 1 | 0.505447 | 0.483878 | 0 | 200 | 21.95 | 85 |
webclinic017/django_bcoin | 18,107,582,139,072 | 01c341feb7b6f5ea0ca244c968535e074f01b531 | 3259378e5b155e0713254881b52579ef287e5742 | /welcome/migrations/0002_candle_quote_stock.py | 180a34040b4b455da360c321b40517f706ff525f | []
| no_license | https://github.com/webclinic017/django_bcoin | 88d3d7ccbe1c0161db24aa1531f302538ec2b5f9 | acc3cf77c443a9532f8dbf440dc37a5c5c042085 | refs/heads/master | 2023-08-11T19:57:13.936620 | 2021-09-15T16:27:54 | 2021-09-15T16:27:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2 on 2021-05-23 18:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('welcome', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Candle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('dataLocation', models.TextField()),
],
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=9)),
],
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=9)),
('open', models.IntegerField()),
('close', models.IntegerField()),
('volume', models.IntegerField()),
('high', models.IntegerField()),
('low', models.IntegerField()),
],
),
]
| UTF-8 | Python | false | false | 1,372 | py | 22,232 | 0002_candle_quote_stock.py | 47 | 0.517493 | 0.500729 | 0 | 40 | 33.3 | 117 |
Andrea-Perin/Information_theory_computation | 19,284,403,183,490 | 5616dcd19724749d38a4ec28346834c1f7170c6c | 4d8cda0bcea8b56309a1a235a8e8f85a63df9ad2 | /10week/AndreaPerin_CODE/comp.py | 74e3f7fab087144c99902f4c0a331e293dbc9a55 | []
| no_license | https://github.com/Andrea-Perin/Information_theory_computation | 04a57802a92187bf09d0d2e136112a022d5b7e5a | ac38c51b60d1a038aa46e27b9cb677b96427fece | refs/heads/master | 2020-08-13T02:32:59.446261 | 2020-01-22T22:10:46 | 2020-01-22T22:10:46 | 214,890,619 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 18:22:47 2020
@author: andrea
"""
import os
import subprocess as sub
# parameters for the simulations
N_LAMBDA = 100
MAX_LAMBDA = 3
# lists of params to be used
lmbd = [str(MAX_LAMBDA*(i/N_LAMBDA)) for i in range(0, N_LAMBDA+1)]
# names of executables
exe = 'ex10_RG_101.x'
gnuscript = 'comp.gnu'
# launching stuff
with open('res_RG_101.dat', "w+") as f:
for lamb in lmbd:
sub.run(['./'+exe,lamb,str(2)], stdout=f)
sub.run(['gnuplot '+gnuscript], shell=True)
| UTF-8 | Python | false | false | 562 | py | 182 | comp.py | 26 | 0.635231 | 0.585409 | 0 | 27 | 19.518519 | 67 |
codss-ctrl/coding-test | 5,634,997,111,987 | 9b421e548cfecb7649e34a4caffa87d2e60cacd7 | d61e2d420342353855127f69ad20e09f4f049f31 | /Programmers/64064. 불량 사용자.py | 958d14df5e4bfca043528364540a0a2ee1ed2971 | []
| no_license | https://github.com/codss-ctrl/coding-test | 6d55bc35619ac114f2551b249258e6424b98de98 | f6497e2ded1804227c8970dc712c60bd58d1e127 | refs/heads/main | 2023-04-26T15:47:27.858277 | 2022-11-15T17:29:10 | 2022-11-15T17:29:10 | 368,121,238 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itertools import permutations
import re
def solution(user_id, banned_id):
answer = []
banned_id = [re.sub('\*','.',b) for b in banned_id]
# ban = ' '.join(banned_id).replace('*','.')
for p in list(permutations(user_id,len(banned_id))):
tmp = []
for i in range(len(banned_id)):
f = re.match(banned_id[i],p[i])
if f and len(banned_id[i]) == len(p[i]):
tmp.append(f.group())
if len(tmp) == len(banned_id):
answer.append(sorted(p))
return set(list(map(tuple,answer))).__len__()
# return answer
user_id = ["frodo", "fradi", "crodo", "abc123", "frodoc"]
banned_id =["fr*d*", "abc1**"]
print(solution(user_id,banned_id)) | UTF-8 | Python | false | false | 720 | py | 194 | 64064. 불량 사용자.py | 193 | 0.55 | 0.544444 | 0 | 21 | 33.333333 | 57 |
maybar/DomoControl | 19,542 | 79c8264b8619fb39c7701a54d9809fe77af88328 | b478bab737f99c0b590035af96d4a700dc21122b | /Src/ps_test.py | a079c67642b22e4c0bc615a292d19b14963a355b | []
| no_license | https://github.com/maybar/DomoControl | 0c3b70171afbce955f36f219dd65cfcdc5b4a14a | aff85e9bcae34299a5e0d836062208cf391992f5 | refs/heads/master | 2022-03-11T10:06:22.262476 | 2022-01-23T20:58:10 | 2022-01-23T20:58:10 | 124,775,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import psutil
import sys
sys.path.append('./utils')
pid = 0
list_pid = psutil.pids()
for pid in list_pid:
try:
p = psutil.Process(pid)
if "python3" in p.name():
print('*' * 30)
print("Nombre:", p.name())
print("PID:", pid)
print("EXE:", p.exe())
print("CWD:", p.cwd())
print("CL:", p.cmdline())
print("ST:", p.status())
except (psutil.ZombieProcess, psutil.AccessDenied, psutil.NoSuchProcess):
pass
| UTF-8 | Python | false | false | 520 | py | 28 | ps_test.py | 21 | 0.507692 | 0.5 | 0 | 21 | 23.761905 | 77 |
singhnitink/tcl_scripts | 19,009,525,273,938 | cebd02b408db1198e5ce827302e7cb54f75c2f55 | 684e9a2ac809352194d02e2aaf568961fd0de626 | /tcl_scripts/peptide_system/sscache/one.py | c7d8f3cbdaf9b4ed2a4b0ec2ab4f68777d8894e5 | []
| no_license | https://github.com/singhnitink/tcl_scripts | f3bb0029e7f387c9b047e921768f5f99a94b4e12 | e20a930874eb1c6414be895c999889c1406785fe | refs/heads/main | 2023-07-29T07:40:46.823197 | 2021-09-09T10:27:48 | 2021-09-09T10:27:48 | 385,572,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''This file will be used to calculate the secondary stucture content
... generated from the sscache script'''
import pandas as pd
import numpy as np
data=np.genfromtxt('SEC_ST_FILE_SSCACHE.dat',dtype=None,encoding=None,delimiter='')
file1=open('PER_FRAME.dat','w') #contains helixaverage of 5ns 250 frames
data=np.char.array(data,unicode=True)
lines=data.shape[0]
nres=24 #number of residues in peptide chain
X=[] #creating an empty list to store fraction helix values
a=0
while a<lines:
b=1
helix=0
while b<25:
#25 => is number of residues+1
#a is line b is column
if data[a,b]=='H':
helix+=1
b+=1
#print(a,helix/nres)
X.append(helix/nres)
file1.write("%d\t%.2f\n"%(a,helix/nres))
a+=1
X.append(helix/nres)
file1.write("%d\t%.2f\n"%(a,helix/nres))
file1.close()
#******* averaging the data stored in list X
t1=STARTTIME #start time
interval=INTERVAL #interval
##**Change the value of interval for changing the time period of mean##
file2=open('AVERAGE_DATA.dat','w') #contains helixaverage every 5ns ie. 250 frames
t2=t1+interval
m=0
mean_int=(1000//100)*interval
#40 is time in ps at which you are writing the dcd file
n=mean_int
file2.write('#t1\tt2\t run \tmean\t std \t median\n')
while n<=len(X):
mean=np.mean(X[m:n])
std=np.std(X[m:n])
median=np.median(X[m:n])
file2.write('%d\t%d\t RUN \t%.2f\t%.4f\t%.4f\n'%(t1,t2,mean,std,median))
##print(m,n)
m+=mean_int
n+=mean_int
t1+=interval
t2+=interval
file2.close()
print(len(X))
print('DONE')
| UTF-8 | Python | false | false | 1,609 | py | 28 | one.py | 12 | 0.642014 | 0.608452 | 0 | 51 | 29.54902 | 83 |
dedekinds/Single-connected-region-orthogonal-mesh-generation | 8,091,718,416,075 | 336143069093d1f97d220bdfb617672e9606ad84 | d3462caf07c96637c230766ddc8bfe197333fc86 | /train_lightgbm.py | 63b251feb32d265963ba693be14000f04b5aaa11 | [
"MIT"
]
| permissive | https://github.com/dedekinds/Single-connected-region-orthogonal-mesh-generation | 24e263872b4ac52e577f178e37e868f44a51f773 | 09431363c28273fd273c73a5b76664347d151a3a | refs/heads/master | 2018-10-22T09:50:51.377761 | 2018-10-12T06:57:13 | 2018-10-12T06:57:13 | 141,402,421 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 11:51:18 2018
@author: dedekinds
"""
# coding: utf-8
# pylint: disable = invalid-name, C0111
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import joblib
import os
import pygridgen
import matplotlib.pyplot as plt
import random
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--adj_num", nargs='?', type=int, help=" argument", default=20)
parser.add_argument("--From", nargs='?', type=int, help=" argument", default=30)
parser.add_argument("--To", nargs='?', type=int, help=" argument", default=40)
args = parser.parse_args()
loss_limit = 2.00
adjacency_num = args.adj_num
FROM = args.From
TO = args.To
os.chdir('./shp/train_data_'+str(FROM)+'_to_'+str(TO))
#os.chdir('./train_data')
lst = os.listdir(os.getcwd())
filename = []
for c in lst:
if os.path.isfile(c) and c.endswith('.csv'):# and c.find("test") == -1
filename.append(c)
def judge_inner_point(nvert, vertx, verty, testx, testy):
#PNPoly algorithm (judge whether a point is in a given polygon)
#nvert : the number of the polygon's vertex
#vertx(y) : coordinate of the polygon
#testx(y) : coordinate of the test point
i, j ,c = 0,nvert-1,False
for i in range(nvert):
P1 = ((verty[i]>testy) != (verty[j]>testy))
P2 = (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) /(verty[j]-verty[i]+0.0000000001) + vertx[i])
if P1 & P2:
c = not c
j = i
#print(P1,P2,c)
return c
def adjacency(lat, lon, len_num):
#return (train) length & angle
length = []
angle = []
#length
for i in range(len_num-1):
LAT = (lat[i]-lat[i+1])**2
LON = (lon[i]-lon[i+1])**2
length.append(np.sqrt(LAT + LON) )
LAT = (lat[0]-lat[-1])**2
LON = (lon[0]-lon[-1])**2
length.append(np.sqrt(LAT + LON))
length = (np.array(length)-np.mean(length)) / (np.std(length)+0.00000001)
#angle
for i in range(len_num-1):
v1 = (lat[i+1] - lat[i] , lon[i+1] - lon[i])
v2 = (lat[i-1] - lat[i] , lon[i-1] - lon[i])
inner = v1[0]*v2[0] + v1[1]*v2[1]
a1 = np.sqrt(v1[0]**2 + v1[1]**2)
a2 = np.sqrt(v2[0]**2 + v2[1]**2)
if judge_inner_point(len_num, lat, lon, (lat[i+1]+lat[i-1])/2, (lon[i+1]+lon[i-1])/2 ):
angle.append(np.arccos(inner/(a1*a2))/np.pi*180 )
else:
angle.append(360-np.arccos(inner/(a1*a2))/np.pi*180)
v1 = (lat[0] - lat[-1] , lon[0] - lon[-1])
v2 = (lat[-2] - lat[-1] , lon[-2] - lon[-1])
inner = v1[0]*v2[0] + v1[1]*v2[1]
a1 = np.sqrt(v1[0]**2 + v1[1]**2)
a2 = np.sqrt(v2[0]**2 + v2[1]**2)
if judge_inner_point(len_num, lat, lon, (lat[0]+lat[-2])/2, (lon[0]+lon[-2])/2 ):
angle.append(np.arccos(inner/(a1*a2))/np.pi*180 )
else:
angle.append(360-np.arccos(inner/(a1*a2))/np.pi*180)
angle = np.array(angle)/360
return angle,length
Input = []
Output = []
num = -1
for file in filename:
num += 1
f = open(file)
df = pd.read_csv(f)
data = df.values[:,1:]
len_num = int(data[0][-1])
temp = data[0][:2*len_num].reshape(-1,2)
lat = temp[:,0]
lon = temp[:,1]
corner = data[0][2*len_num:-2]
#length and angle
file_angle ,file_length = adjacency(lat, lon, len_num)
for i in range(len_num):
#length + angle --> input
#corner --> output
temp_input = []
temp_output = []
# #input length
for j in range(int(adjacency_num/2)):
temp_input.append(file_length[(i+j)%len_num])
temp_input.append(file_length[(i-j-1)%len_num])
#input angle
temp_input.append(file_angle[i])
for j in range(int((adjacency_num-3)/2)):
temp_input.append(file_angle[(i+j+1)%len_num])
temp_input.append(file_angle[(i-j-1)%len_num])
temp_output.append(corner[i])
Input.append(temp_input)
Output.append(temp_output)
for i in range(len_num):
if corner[i] == 1:
#length + angle --> input
#corner --> output
temp_input = []
temp_output = []
#input length -1
for j in range(int(adjacency_num/2)):
temp_input.append(file_length[(i+j)%len_num])
temp_input.append(file_length[(i-j-1)%len_num])
#input angle -1
temp_input.append(1-file_angle[i])
for j in range(int((adjacency_num-3)/2)):
temp_input.append(1-file_angle[(i+j+1)%len_num])
temp_input.append(1-file_angle[(i-j-1)%len_num])
temp_output.append(-1)
Input.append(temp_input)
Output.append(temp_output)
else:
continue
#ANN train________________________________________________________________________________________
temp_total = np.column_stack((np.array(Input),np.array(Output)))
#
total = temp_total[0]
for i in range(1,len(temp_total)):#len(temp_total)
if temp_total[i][-1] in [1,-1,0]:
total = np.row_stack((total,temp_total[i]))
continue
total = np.delete(total, 0, 0)
random.shuffle(total)
TEMP_X = total[:, :-1].astype(float)
TEMP_y = total[:, -1]+1
rat = 0.8
rat_num = int(len(total)*rat)
X = TEMP_X[:rat_num,:]
y = TEMP_y[:rat_num]
TEST_X = TEMP_X[rat_num:,:]
TEST_y = TEMP_y[rat_num:]
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2)
params = {
'task' : 'train',
'boosting_type' : 'gbdt',
'objective' : 'multiclass',
'metric' : {'multi_logloss'},
'num_leaves' : 63,
'learning_rate' : 0.01,
'feature_fraction' : 0.9,
'bagging_fraction' : 0.9,
'bagging_freq': 0,
'verbose' : 1,
'num_class' : 3
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
gbm = lgb.train(params,
lgb_train,
num_boost_round=2000,
valid_sets=lgb_eval,
early_stopping_rounds=5)
#
preds = gbm.predict(TEST_X)
predictions = []
for x in preds:
predictions.append(np.argmax(x))
temp = np.array(predictions)-TEST_y
print(np.sum(temp == 0)/len(temp))
#
print('Save model...')
# save model to file
#gbm.save_model('model.txt')
# save model
joblib.dump(gbm, 'lgb_model_'+str(TO)+'corenr_'+str(adjacency_num)+'adj.pkl')
## load model
#gbm_pickle = joblib.load('lgb_model.pkl')
#
#print('Start predicting...')
## predict
#y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
## eval
#print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
#
#import matplotlib.pyplot as plt
#
#for i in range(len(Input[0])):
# temp = []
# for inp in Input:
# temp.append(inp[i])
# plt.hist(temp,500,normed=True)
# plt.savefig('./a/angle15/angle_15_'+str(i)+".png")
# plt.show()
#
#
#
#for i in range(len(Input[0])):
# temp = []
# for inp in Input:
# temp.append(inp[i])
# plt.hist(temp,500)
# plt.savefig('./a/length10/length_10_'+str(i)+".png")
# plt.show() | UTF-8 | Python | false | false | 7,538 | py | 5,423 | train_lightgbm.py | 12 | 0.542054 | 0.508358 | 0 | 295 | 24.555932 | 106 |
moreC/lcode | 14,791,867,386,282 | 2e38dedc6f79531cb575291d3bb544df4b61080a | 70bc2801f246ea0bcf565f9a0d7f4eacbdfd92ee | /net/examples/xception.py | c265dabd2259ce4954b3430023d3ff200a33e191 | []
| no_license | https://github.com/moreC/lcode | 0cda6143ab00477e62802e67fb92182bb23aec26 | 88744e5bb11cefd20df49dc547c2d0344be2e824 | refs/heads/master | 2018-09-28T02:16:25.205767 | 2018-08-29T02:56:51 | 2018-08-29T02:56:51 | 82,276,208 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import caffe
from caffe import layers as L
from caffe import params as P
def _conv(net, bottom, num_out, ks, stride, pad, group=1, depthwise=False):
layer = L.Convolution(net[bottom],
num_output = num_out,
kernel_size = ks,
stride = stride,
pad = pad,
bias_term = False,
group = group,
weight_filler = dict(type='xavier'),)
if depthwise:
layer.type = 'DepthwiseConvlution'
return layer
def conv_bn_sc_relu(net, bottom, block, num_out, ks, stride, pad):
net[block] = _conv(net, bottom, num_out, ks, stride, pad)
net[block+'_bn'] = L.BatchNorm(net[block], eps=0.001, in_place=True, param=[dict(lr_mult=0)] * 3)
net[block+'_scale'] = L.Scale(net[block+'_bn'], bias_term=True, in_place=True)
net[block+'_relu'] = L.ReLU(net[block+'_scale'], in_place=True)
return net
def _xception_module(net, bottom, block, in_num, out_num, stride, relu=True):
if relu:
net[block+'_relu'] = L.ReLU(net[bottom])
net[block+'_conv1_1'] = _conv(net, block+'_relu', in_num, 3, 1, 1, group=in_num, depthwise=True)
else:
net[block+'_conv1_1'] = _conv(net, bottom, in_num, 3, 1, 1, group=in_num, depthwise=True)
net = conv_bn_sc_relu(net, block+'_conv1_1', block+'_conv1_2', out_num, 1, 1, 0)
net[block+'_conv2_1'] = _conv(net, block+'_conv1_2', out_num, 3, 1, 1, group=out_num, depthwise=True)
net = conv_bn_sc_relu(net, block+'_conv2_1', block+'_conv2_2', out_num, 1, 1, 0)
if stride == 2:
net = conv_bn_sc_relu(net, bottom, block+'_match_conv', out_num, 1, stride, 0)
net[block+'_pool'] = L.Pooling(net[block+'_conv2_2'], kernel_size=3, stride=2, pool=P.Pooling.MAX)
net[block+'_eltwise'] = L.Eltwise(net[block+'_match_conv'], net[block+'_pool'])
elif stride == 1:
net[block+'_conv3_1'] = _conv(net, block+'_conv2_2', out_num, 3, 1, 1, group=out_num)
net = conv_bn_sc_relu(net, block+'_conv3_1', block+'_conv3_2', out_num, 1, 1, 0)
net[block+'_eltwise'] = L.Eltwise(net[bottom], net[block+'_conv3_2'])
else:
raise
return net
def get_net(data, label, num_class):
net = caffe.NetSpec()
net['data'] = data
net = conv_bn_sc_relu(net, 'data', 'conv1', 24, 3, 2, 1)
net['pool1'] = L.Pooling(net['conv1'], kernel_size=3, stride=2, pool=P.Pooling.MAX)
net = conv_bn_sc_relu(net, 'pool1', 'conv2', 72, 3, 1, 1)
stage_lengths = [4, 8, 4]
stage_f_size = [144, 288, 576]
n = 0
bottom = 'conv2'
for k, stage in enumerate(stage_lengths):
for i in range(stage):
relu = True if n != 0 else False
stride = 2 if i == 0 else 1
in_num = stage_f_size[k] / 2 if i == 0 else stage_f_size[k]
out_num = stage_f_size[k]
net = _xception_module(net, bottom, 'xception{}'.format(n+1), in_num, out_num, stride, relu)
bottom = "xception{}_eltwise".format(n+1)
n += 1
net['gap'] = L.Pooling(net[bottom], pool=P.Pooling.AVE, global_pooling=True)
net['fc'] = L.InnerProduct(net['gap'],
num_output = num_class,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
return net
| UTF-8 | Python | false | false | 3,342 | py | 60 | xception.py | 58 | 0.559844 | 0.529324 | 0 | 79 | 41.291139 | 106 |
collective/collective.webservice | 15,109,694,986,302 | c27776a908783697846f567dc673073afb51603b | 8adb811e18408aea9258db1696e46a3bc21e5847 | /src/collective/webservice/__init__.py | 7ed893b3f7f1e7bd5265ac503b9615fbf67e072d | []
| no_license | https://github.com/collective/collective.webservice | 2b39f2480b6538dfd350416be4f1689c157b2c8a | 269cc42aa34c01201de738bbcfcdd33c232aa078 | refs/heads/master | 2023-03-22T14:39:18.918801 | 2018-07-27T12:28:50 | 2018-07-27T12:28:50 | 5,853,122 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from AccessControl import allow_class
from AccessControl import allow_module
from AccessControl import ModuleSecurityInfo
from zope.i18nmessageid import MessageFactory
# Define a message factory for when this product is internationalised.
# This will be imported with the special name "_" in most modules. Strings
# like _(u"message") will then be extracted by i18n tools for translation.
WebserviceMessageFactory = MessageFactory('collective.webservice')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
tipos = """
voidType
stringType
untypedType
IDType
NCNameType
NameType
ENTITYType
IDREFType
languageType
NMTOKENType
QNameType
tokenType
normalizedStringType
CDATAType
booleanType
decimalType
floatType
doubleType
durationType
timeDurationType
dateTimeType
recurringInstantType
timeInstantType
timePeriodType
timeType
dateType
gYearMonthType
gYearType
centuryType
yearType
gMonthDayType
recurringDateType
gMonthType
monthType
gDayType
recurringDayType
hexBinaryType
base64BinaryType
base64Type
binaryType
anyURIType
uriType
uriReferenceType
NOTATIONType
ENTITIESType
IDREFSType
NMTOKENSType
integerType
nonPositiveIntegerType
non_Positive_IntegerType
negativeIntegerType
negative_IntegerType
longType
intType
shortType
byteType
nonNegativeIntegerType
non_Negative_IntegerType
unsignedLongType
unsignedIntType
unsignedShortType
unsignedByteType
positiveIntegerType
positive_IntegerType
compoundType
structType
headerType
bodyType
arrayType
typedArrayType
"""
tipos = [t.strip() for t in tipos.split('\n') if t.strip()]
product_globals = globals()
for t in tipos:
dotted_name = 'SOAPpy.Types.' + t
parts = dotted_name.split('.')
m_name = '.'.join(parts[:-1])
k_name = parts[-1]
ModuleSecurityInfo(m_name).declarePublic(t)
module = __import__(m_name, product_globals, locals(), [k_name])
klass = getattr(module, k_name)
allow_class(klass)
allow_module('xml.parsers.expat')
ModuleSecurityInfo('App.Common').declarePublic('rfc1123_date')
| UTF-8 | Python | false | false | 2,660 | py | 17 | __init__.py | 10 | 0.619173 | 0.613158 | 0 | 107 | 23.859813 | 74 |
Eyadkht/photo-sharing-app | 4,209,067,996,968 | d4d1ef58847c9f2d7be1280d6083f69792aef42a | 2b21c9acf126c07bba1d22f8f43c4c48f134530c | /config/wsgi.py | c29490c9d37b0adbbb0def3a792205ec00569e6e | []
| no_license | https://github.com/Eyadkht/photo-sharing-app | cdfeaae8b01b1c558009d5477f132b4ace052a43 | 5c77b0700a5e862f2b98d734476590a72c882201 | refs/heads/master | 2023-02-21T16:30:40.304678 | 2021-01-21T18:30:37 | 2021-01-21T18:30:37 | 226,185,507 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
WSGI config for photo_sharing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
if os.getenv('GAE_APPLICATION', None):
value = os.getenv('GAE_APPLICATION', None)
if value == "g~photosharingapp-261121":
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production')
elif value == "g~photosharingapp-staging":
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.staging')
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.development')
application = get_wsgi_application() | UTF-8 | Python | false | false | 794 | py | 40 | wsgi.py | 27 | 0.709068 | 0.698992 | 0 | 25 | 30.8 | 89 |
angelmacwan/Python | 1,408,749,322,910 | 5de037cbd0b06b77335b5a8de28291db7a274802 | 15b08827e446c80e22eb79ac638446fde564a369 | /sortingViz.py | 3965308d39984e818c639077d11d54025cd0caf8 | []
| no_license | https://github.com/angelmacwan/Python | 2d1c90d92a8898206596e593963041c5fd5af4a1 | d20be74e1e1b1d7300320499477b4974fb5b3734 | refs/heads/main | 2023-03-24T04:18:30.553535 | 2021-03-23T17:03:47 | 2021-03-23T17:03:47 | 326,181,992 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
import random
n = 100
speed = 60
WIDTH, HEIGHT = 600, 300
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
run = True
arr = []
for i in range(n):
arr.append([random.randint(0, HEIGHT), 0])
cur = -1
comparisions = 0
operations = 0
while run:
width = WIDTH/n
x = 0
WIN.fill((255,255,255))
print(f'Comparisions => {comparisions}, Operations => {operations}')
for e in pygame.event.get():
if(e.type == pygame.QUIT):
run = False
for a in range(len(arr)):
if(arr[a][1] == 0):
pygame.draw.rect(WIN, (0, 255, 255), (x, 0, width, arr[a][0]))
elif(arr[a][1] == 1):
pygame.draw.rect(WIN, (0, 255, 0), (x, 0, width, arr[a][0]))
arr[a][1] = 0
elif(arr[a][1] == 2):
pygame.draw.rect(WIN, (255, 0, 0), (x, 0, width, arr[a][0]))
arr[a][1] = 0
x += width
pygame.time.delay(100)
for i in range(speed):
comparisions+=1
if(arr[cur][0] < arr[cur+1][0]):
arr[cur+1][0], arr[cur][0] = arr[cur][0], arr[cur+1][0]
arr[cur][1], arr[cur+1][1] = 1,2
cur = -1
operations += 1
if(cur < len(arr)-2):
cur += 1
pygame.display.update()
pygame.quit()
| UTF-8 | Python | false | false | 1,343 | py | 6 | sortingViz.py | 5 | 0.473567 | 0.413254 | 0 | 61 | 20.016393 | 74 |
Pravanakotha/python-git-project | 18,339,510,358,734 | 4267c27dcdb1be3f00ccb5efd10f67c7f3480eee | 728ea30cf74220606824985b6955ad1e039531e9 | /main.py | 6fa524660b80164b5de9090634a612e2e03c93a8 | []
| no_license | https://github.com/Pravanakotha/python-git-project | 659680a7e361beb2fe3544804c3cf17f99e4496b | 90d72660b2889274168a701395df447e5bced455 | refs/heads/main | 2023-08-23T10:39:20.123427 | 2021-11-08T11:06:31 | 2021-11-08T11:06:31 | 425,770,610 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | num = int(input("Enter number to check:"))
sum = 0
temp = num
while num>0:
rem = num%10
sum = sum * 10 +rem
num = num // 10
if temp==sum:
print("Palindrome")
else:
print("Not a palindrome") | UTF-8 | Python | false | false | 219 | py | 2 | main.py | 2 | 0.561644 | 0.525114 | 0 | 11 | 18.090909 | 42 |
AdamZhouSE/pythonHomework | 4,604,204,942,137 | cc5710785ce6a6c9e01f9c8c1d6b8aca345b1d4c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2739/60825/244338.py | 19ce6838aaf89b62e9ee1379119483a96dea5af7 | []
| no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | res=[]
def printAns(currList, currSum, target, k):
if currSum==target and currList.size()==k:
res.append(currList)
return
elif currSum>target||currList.size()>k:
return
else:
for i in range(currList[len(currList)-1], 9):
t=currList[:]
t.append(i)
printAns(t, currSum+i, target, k)
s=input()
k=int(s[0])
target=int(s[3:])
printAns([], 0, target, k)
print(res) | UTF-8 | Python | false | false | 442 | py | 45,079 | 244338.py | 43,489 | 0.565611 | 0.554299 | 0 | 21 | 20.095238 | 53 |
jay9989/DiscordBots | 5,274,219,886,026 | cb1e22a5f2023bf7442a1ade210e259260a9b2fa | 5a1b19c35ba6b6032336522ad72441d6a7f96d42 | /RollBot.py | 24d169c6e83b2b96c2de65fa5a0a3393cbebe8f5 | []
| no_license | https://github.com/jay9989/DiscordBots | 08de952f634aab72e1acd4697d3db01bf5d8667a | 030be4e6f2358e3c1ffe7135248f22828b3c46cc | refs/heads/master | 2022-07-20T00:13:01.752041 | 2020-05-21T02:48:48 | 2020-05-21T02:48:48 | 265,735,312 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import string
import os
import discord
from discord.ext import commands
client = commands.Bot(command_prefix='/')
global NAMES
NAMES = []
@client.event
async def on_ready():
print('RollBot is ready!')
global roll_channel
@client.command()
async def roll(ctx, max: int):
# Get the server channel to send the "user rolled # out of #" message to
roll_channel = client.get_channel(700426150329057340)
# Get the random roll number from 1 to the number that was inputed
rolled = random.randint(1, max)
# If the user that is rolling does not have their name in the list
if ctx.message.author.name not in NAMES:
# Send the "User rolled # out of #" message to the admin channel
await roll_channel.send(f"{ctx.message.author.mention} has rolled a " + str(rolled) + " out of " + str(max))
# Add the user's name to the list so they can not reroll
NAMES.append(ctx.message.author.name)
# This is for debugging purposes
print(NAMES)
# This is for the fun of it
counter = open("counter.txt", "r")
iC = counter.readline()
iC = int(iC)
iC += 1
counter = open("counter.txt", "w")
counter.write(str(iC))
counter.close()
else:
# If the name already exists in the list, send this message to the user
await ctx.message.author.send(
"Sorry, but you've already rolled in the Togethearn raffle. If there has been a mistake, please contact the Togethearn staff.")
@client.command()
async def rem(ctx, name: str):
# This will give us the User ID without "<", ">", and "@"
name = name.replace("<", "")
name = name.replace(">", "")
name = name.replace("@", "")
# This gets the role from the server
role = discord.utils.get(ctx.guild.roles, name="RBAs")
# This returns the user's name
user = client.get_user(int(name))
username = user.name
# If the role of the user matches the required role
if role in ctx.author.roles:
# If the username exists in the list
if username in NAMES:
# Remove the name from the list
NAMES.remove(username)
# For debugging purposes
print(NAMES)
else:
# If the user does not exist on the list, send this message
await ctx.author.send("The user " + username + " has not yet rolled.")
@client.command()
async def list(ctx):
role = discord.utils.get(ctx.guild.roles, name="RBAs")
if role in ctx.author.roles:
await ctx.send(NAMES)
@client.command()
async def clear(ctx):
role = discord.utils.get(ctx.guild.roles, name="RBAs")
if role in ctx.author.roles:
NAMES.clear()
await ctx.author.send("The list has been cleared.")
client.run(os.environ['TOKEN'])
| UTF-8 | Python | false | false | 2,851 | py | 2 | RollBot.py | 1 | 0.628551 | 0.621186 | 0 | 106 | 25.896226 | 139 |
fabyom/PageParser | 3,513,283,269,544 | d03849257c8ff63f7adf30e7e852d8da41da67cd | 68139c834a1370a68c33b7448c0127d59f0e6c57 | /Parser_OCR/AltoParser.py | b124c28134392bb1837a6b05532fe7344b712ff9 | []
| no_license | https://github.com/fabyom/PageParser | 35e78d29d7ac7e3459ac22da2f3e98ef76f875cf | 9d8189e71fd2ae3c1c2d70a88688f971a578a564 | refs/heads/master | 2022-04-27T21:30:31.574129 | 2020-04-19T22:13:11 | 2020-04-19T22:13:11 | 259,223,974 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from lxml import etree
from Parser_WiTTFind.Parser_OCR.Alto import AltoDocument, AltoPage, AltoLine
def ns(tag):
"""
Add the ALTO namespace to the tag name.
"""
return '{http://www.loc.gov/standards/alto/ns-v2#}' + tag
class AltoParser:
"""
Parser_WiTTFind to read a set of ALTO XML documents into a Python object graph containing the coordinates and content of each
analysed text line.
"""
def __init__(self, alto_path, document_id):
self.document = AltoDocument(document_id)
self.path = alto_path
self.files = os.listdir(alto_path)
def read_files(self):
"""
Read the files in the alto_path of the parser and build the object graph from them.
"""
for filename in self.files:
full_path = self.path + "/" + filename
tree = etree.parse(full_path)
self.parse_page(os.path.splitext(filename)[0], tree)
def parse_page(self, page_id, tree):
"""
Read ALTO XML of a single page.
Add the result to the page collection / document.
:param page_id: Full name of the page, such as 'Ms-114,1r'.
:param tree: ElementTree read from an ALTO XML file by lxml.
"""
page = AltoPage(page_id)
for text_line in tree.findall(".//{}".format(ns("TextLine"))):
page.add_line(self.parse_line(text_line))
self.document.add_page(page)
@staticmethod
def parse_line(text_line):
"""
Parse a line from an ALTO document.
:param text_line: An lxml TextLine element.
:return: The line as an AltoLine object.
"""
content = text_line[0].get("CONTENT")
line_id = text_line.get("ID")
hpos = text_line.get("HPOS")
vpos = text_line.get("VPOS")
height = text_line.get("HEIGHT")
width = text_line.get("WIDTH")
if len(text_line) > 1:
print("WARNING: TextLine with more than one child does occur:", text_line[1].tag)
return AltoLine(line_id, content, hpos, vpos, height, width)
| UTF-8 | Python | false | false | 2,172 | py | 14 | AltoParser.py | 14 | 0.601289 | 0.597145 | 0 | 66 | 31.909091 | 129 |
CenterForOpenScience/osf.io | 16,209,206,586,613 | fb306ab4c8aac5067b813e62cbe9336095cb6920 | 3c41443364da8b44c74dce08ef94a1acd1b66b3e | /addons/github/apps.py | d397c060ed50cbb039f12e3a07a33fbe2daf1134 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
]
| permissive | https://github.com/CenterForOpenScience/osf.io | 71d9540be7989f7118a33e15bc4a6ce2d2492ac1 | a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e | refs/heads/develop | 2023-09-04T03:21:14.970917 | 2023-08-31T14:49:20 | 2023-08-31T14:49:20 | 10,199,599 | 683 | 390 | Apache-2.0 | false | 2023-09-14T17:07:52 | 2013-05-21T15:53:37 | 2023-09-02T22:35:24 | 2023-09-14T17:07:51 | 198,926 | 645 | 318 | 155 | Python | false | false | import logging
import os
from addons.base.apps import BaseAddonAppConfig
from addons.github.api import GitHubClient, ref_to_params
from addons.github.exceptions import NotFoundError, GitHubError
from addons.github.settings import MAX_UPLOAD_SIZE
from addons.github.utils import get_refs, check_permissions
from website.util import rubeus
logger = logging.getLogger(__name__)
logging.getLogger('github3').setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
def github_hgrid_data(node_settings, auth, **kwargs):
# Quit if no repo linked
if not node_settings.complete:
return
connection = GitHubClient(external_account=node_settings.external_account)
# Initialize repo here in the event that it is set in the privacy check
# below. This potentially saves an API call in _check_permissions, below.
repo = None
# Quit if privacy mismatch and not contributor
node = node_settings.owner
if node.is_public and not node.is_contributor_or_group_member(auth.user):
try:
repo = connection.repo(node_settings.user, node_settings.repo)
except NotFoundError:
logger.error('Could not access GitHub repo')
return None
except GitHubError:
return
if repo.private:
return None
try:
branch, sha, branches = get_refs(
node_settings,
branch=kwargs.get('branch'),
sha=kwargs.get('sha'),
connection=connection,
)
except (NotFoundError, GitHubError):
# TODO: Show an alert or change GitHub configuration?
logger.error('GitHub repo not found')
return
if branch is not None:
ref = ref_to_params(branch, sha)
can_edit = check_permissions(
node_settings, auth, connection, branch, sha, repo=repo,
)
else:
ref = None
can_edit = False
name_tpl = '{user}/{repo}'.format(
user=node_settings.user, repo=node_settings.repo
)
permissions = {
'edit': can_edit,
'view': True,
'private': node_settings.is_private
}
urls = {
'upload': node_settings.owner.api_url + 'github/file/' + (ref or ''),
'fetch': node_settings.owner.api_url + 'github/hgrid/' + (ref or ''),
'branch': node_settings.owner.api_url + 'github/hgrid/root/',
'zip': node_settings.owner.api_url + 'github/zipball/' + (ref or ''),
'repo': 'https://github.com/{0}/{1}/tree/{2}'.format(node_settings.user, node_settings.repo, branch)
}
branch_names = [each.name for each in branches]
if not branch_names:
branch_names = [branch] # if repo un-init-ed then still add default branch to list of branches
return [rubeus.build_addon_root(
node_settings,
name_tpl,
urls=urls,
permissions=permissions,
branches=branch_names,
defaultBranch=branch,
private_key=kwargs.get('view_only', None),
)]
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = os.path.join(
HERE,
'templates',
'github_node_settings.mako',
)
class GitHubAddonConfig(BaseAddonAppConfig):
name = 'addons.github'
label = 'addons_github'
full_name = 'GitHub'
short_name = 'github'
configs = ['accounts', 'node']
categories = ['storage']
owners = ['user', 'node']
has_hgrid_files = True
max_file_size = MAX_UPLOAD_SIZE
node_settings_template = NODE_SETTINGS_TEMPLATE
@property
def get_hgrid_data(self):
return github_hgrid_data
FILE_ADDED = 'github_file_added'
FILE_REMOVED = 'github_file_removed'
FILE_UPDATED = 'github_file_updated'
FOLDER_CREATED = 'github_folder_created'
NODE_AUTHORIZED = 'github_node_authorized'
NODE_DEAUTHORIZED = 'github_node_deauthorized'
NODE_DEAUTHORIZED_NO_USER = 'github_node_deauthorized_no_user'
REPO_LINKED = 'github_repo_linked'
actions = (
FILE_ADDED,
FILE_REMOVED,
FILE_UPDATED,
FOLDER_CREATED,
NODE_AUTHORIZED,
NODE_DEAUTHORIZED,
NODE_DEAUTHORIZED_NO_USER,
REPO_LINKED)
@property
def routes(self):
from . import routes
return [routes.api_routes]
@property
def user_settings(self):
return self.get_model('UserSettings')
@property
def node_settings(self):
return self.get_model('NodeSettings')
| UTF-8 | Python | false | false | 4,508 | py | 2,129 | apps.py | 1,647 | 0.638642 | 0.637533 | 0 | 147 | 29.666667 | 108 |
misingnoglic/tumblr-snapr | 13,812,614,834,214 | 3277c38cc6a0d381ef54120e17c8b18e6eea504e | 82a86f0cf1eabe0252523ff960775cd307700b71 | /upload_test.py | 5d9c12985551ab810e6b29826ecdc01e471d8ba4 | []
| no_license | https://github.com/misingnoglic/tumblr-snapr | 4cf87a8d12e856c30f8175739a39bdd2fbc88d47 | 74d5fbb424f7c3287cbaef24a5731277eece1a1b | refs/heads/master | 2020-12-24T13:21:14.948128 | 2015-02-22T21:15:43 | 2015-02-22T21:15:43 | 30,952,589 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #from __future__ import unicode_literals
__author__ = 'arya'
from tumblpy import Tumblpy
from secrets import tumblr_oauth_key , tumblr_secret_key, tumblr_token, tumblr_token_secret
t = Tumblpy( tumblr_oauth_key, tumblr_secret_key, tumblr_token, tumblr_token_secret)
blog_url = t.post('user/info')
blog_url = blog_url['user']['blogs'][0]['url']
photo = open('content/image.jpg')
post = t.post('post', blog_url=blog_url, params={'type':'photo', 'caption': 'Test Caption', 'data': photo, 'tags':"stuff"})
print post # returns id if posted successfully | UTF-8 | Python | false | false | 555 | py | 5 | upload_test.py | 3 | 0.704505 | 0.702703 | 0 | 16 | 33.75 | 123 |
danbi2990/python_practice | 8,246,337,253,213 | a24972b827e3fd8d7dcab11bbe6b2786c3cfe5f9 | e94363b6dc2d003f19f6c97a1bdc7e47f96aed53 | /tutorial_bodenseo/ch31_iterator_generator/act1_simple_generator.py | 9f6250e632ff74ded153839e4b811d9dd6cd0e94 | []
| no_license | https://github.com/danbi2990/python_practice | c4f74fbeb9002dbcbc2de65b48cacfb161cf7742 | 15ad87740d3aeb45e45886e2a20aeb64b62df1af | refs/heads/master | 2021-01-11T18:12:21.790000 | 2017-02-07T13:19:26 | 2017-02-07T13:19:26 | 79,514,548 | 0 | 1 | null | false | 2017-01-20T02:15:02 | 2017-01-20T01:51:12 | 2017-01-20T01:51:12 | 2017-01-20T02:15:02 | 0 | 0 | 0 | 0 | null | null | null | def city_generator():
yield("London")
yield("Hamburg")
yield("Konstanz")
yield("Amsterdam")
yield("Berlin")
yield("Zurich")
yield("Schaffhausen")
yield("Stuttgart")
city = city_generator()
print(next(city))
print(next(city))
print(next(city))
print(next(city))
print(next(city))
print(next(city))
print(next(city))
print(next(city))
| UTF-8 | Python | false | false | 366 | py | 51 | act1_simple_generator.py | 47 | 0.655738 | 0.655738 | 0 | 19 | 18.263158 | 25 |
dshayden/npetracker | 635,655,186,525 | 5793645ef145680d3482423c04049ebccbb67863 | fdb996c7268b84c09d7e38aa293a9641f29d8b54 | /NPETracker.py | f3aee07300ec0a39b5d56df92405b380d6fc4218 | []
| no_license | https://github.com/dshayden/npetracker | 740189ab5ad5f9ecc5f0c9304b5824662d8ad296 | 16ddce154c9df7b07c372075f80dd55fdbb20f00 | refs/heads/master | 2023-03-28T19:59:51.933346 | 2018-08-27T23:42:12 | 2018-08-27T23:42:12 | 346,836,062 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np, argparse
import mixtures
import functools
import du, du.stats
import matplotlib.pyplot as plt
import IPython as ip
def build_opts(nY, nX, K, **kwargs):
o = argparse.Namespace()
# Parameters
o.nY = nY
o.nX = nX
o.N = nY*nX
o.K = K
o.camK = kwargs.get('camK', None)
o.camKi = np.linalg.inv(o.camK) if o.camK is not None else None
o.Dy_loc = 3 if o.camK is not None else 2
o.Dy_app = 3
o.Dy = o.Dy_loc + o.Dy_app
o.Dx = 2 * o.Dy_loc
zer = np.zeros((o.Dy_loc, o.Dy_loc))
eye = np.eye(o.Dy_loc)
o.F = kwargs.get('F', np.block([[eye, eye], [zer, eye]]))
o.H = kwargs.get('H', np.block([eye, zer]))
o.lab = kwargs.get('lab', True)
o.fgMask = kwargs.get('fgMask', np.ones((o.nY, o.nX), dtype=np.bool))
o.fgMask = o.fgMask.flatten()
o.nPartSamples = kwargs.get('nPartSamples', 100)
# nPostSamples
o.maxBreaks = kwargs.get('maxBreaks', 5)
o.drawColors = kwargs.get('drawColors', du.diffcolors(K))
# Convenience
yy, xx = np.meshgrid(range(o.nY), range(o.nX), indexing='ij')
o._xf, o._yf = (xx.flatten(), yy.flatten())
o._xy = np.stack((o._xf, o._yf), axis=1).astype(np.float)
# Priors
## parts
o.Gamma = kwargs.get('Gamma', np.tile(np.eye(o.Dy_loc), (K, 1, 1)))
o.v = kwargs.get('v', 2000)
defaultS_scale = 50 if o.Dy_loc == 3 else 20
o.S = kwargs.get('S', defaultS_scale*(o.v - o.Dy_loc - 1) * np.eye(o.Dy_loc))
o.alpha = kwargs.get('alpha', 0.1)
o.beta = kwargs.get('beta', np.array([0.98, 1, 1]))
o.x0 = kwargs.get('x0', np.zeros((o.K, o.Dx)))
o.P0 = kwargs.get('P0', np.tile(1e6*np.eye(o.Dx), [o.K, 1, 1]))
# todo: integrate these when I add inference for them
o.Lambda = kwargs.get('Lambda',
np.tile(np.block([[1e-4*eye, zer], [zer, 20*eye]]), [K, 1, 1]))
# [m_{nB}]_{n=1}^N, kappa_B, S_B, v_B
# gamma
# m_A, kappa_A, S_A, v_A
return o
class BGModel:
""" Observation background model. """
def __init__(self, mu_B, Sigma_B, m):
""" Per-observation background model.
INPUT
mu_B (ndarray, [N, 3+dy]): mean background
Sigma_B (ndarray, [N, 3+dy]): (diagonal) background variances
m (ndarray, [N,]): boolean mask indicating missing or non-missing bg model
"""
self.mu_B = mu_B
self.Sigma_B = Sigma_B
self.m = m
def logpdf(self, y):
""" Calculate per-observation background log-likelihood.
INPUT
y (ndarray, [N, 3+dy]): observations
OUTPUT
ll (ndarray, [N,]): log-likelihood of background
"""
return du.stats.logmvnpdfdiag(y, self.mu_B, self.Sigma_B)
class AppModel:
""" Target appearance model. """
def __init__(self, pi, mu_A, Sigma_A):
""" Target appearance model.
INPUT
pi (ndarray, [C,]): appearance mixture coefficients
mu_A (ndarray, [C, 3]): mean appearances
Sigma_A (ndarray, [C, 3, 3]): appearance covariances
"""
self.C = len(pi)
self.pi = pi
self.mu_A = mu_A
self.Sigma_A = Sigma_A
def logpdf(self, yA):
""" Calculate per-observation background log-likelihood.
INPUT
yA (ndarray, [N, 3]): appearance observations
OUTPUT
ll (ndarray, [N,]): log-likelihood of appearance
"""
theta = [ (self.mu_A[c], self.Sigma_A[c]) for c in range(self.C) ]
return mixtures.gmm_logpdf_marginalized(yA, theta, self.pi)
def build_sample(o, x, Pf, mu, Sigma, Pi, pi, ll, **kwargs):
# todo: check everything is expected format/type
delta = kwargs.get('delta', None)
z = kwargs.get('z', None)
s = argparse.Namespace()
s.x = x
s.Pf = Pf
s.mu = mu
s.Sigma = Sigma
s.Pi = Pi
s.pi = pi
s.ll = ll
if delta is not None and z is not None:
s.delta = delta
s.z = z
return s
# def build_outer_sample(**kwargs):
# None
# # x_{kL}^t
#
# ## app
# # pi_{kA}
# # mu_{kcA}, Sigma_{kcA}
#
# ## parts
# # theta_{kp}^t
# # pi_k^t
# # Pi^t
#
# ## background
# # mu_{nB}, Sigma_{nB}
#
# ## associations, store or not???
# # delta_n^t
# # z_n^t
def LoadObs(o, rgb, depth=None):
""" Load registered (rgb, depth) image pair as observation.
INPUT
o (opts)
rgb (str or ndarray): path name or already-loaded image
depth (str or ndarray): path name or already-loaded image
OUTPUT
y (ndarray, [N, dy+3]): observations
m (ndarray, [N,]): boolean mask, False at any missing observation
"""
if type(rgb) == str: rgb = du.imread(rgb)
assert rgb.shape[0] == o.nY and rgb.shape[1] == o.nX, 'Invalid image size.'
if o.lab: img = du.rgb2labImg(rgb).astype(np.float)
else: img = rgb.astype(np.float)
m = np.ones(o.N, dtype=np.bool)
if depth is not None and o.camK is not None:
if type(depth) == str: depth = du.imread(depth)
# any
xyZ = np.concatenate((o._xy,
depth.astype(np.float).flatten()[:,np.newaxis]), axis=1)
xyZ[:,:2] *= xyZ[:,2][:,np.newaxis]
pts = xyZ.dot(o.camKi)
m = depth.flatten() > 0
else:
pts = o._xy
imgF = img.astype(np.float)
imgF.shape = (o.N, 3)
y = np.concatenate((pts, imgF), axis=1)
return y, m
def SampleParts_t(o, x, bg, app, img, depth=None):
""" Infer parts for a given time t.
INPUT
o (opts)
rgb (str or ndarray): path name or already-loaded image
depth (str or ndarray): path name or already-loaded image
x (ndarray, [K, Dx]): sampled target locations
bg (bgModel): background model
app (length-K list of appModel): target appearance models
"""
y, m = LoadObs(o, img, depth)
# handle fgMask and missing data in observations + bgModel
useIdx = np.logical_and(m, np.logical_and(bg.m, o.fgMask))
yUse = y[useIdx, :o.Dy_loc]
# evaluate bg and appearance likelihoods
bgLL = bg.logpdf(y)[useIdx]
appLL = np.stack(
[app[k].logpdf(y[useIdx,o.Dy_loc:]) for k in range(o.K)])
Hx = np.stack( [o.H.dot(x[k]) for k in range(o.K) ] )
fit_kwargs = dict(
nSamples=o.nPartSamples, altLL=bgLL, sideLL=appLL,
alpha=o.alpha, v=o.v, S=o.S, Gamma=o.Gamma, x=Hx,
maxBreaks=o.maxBreaks,
zInitStrategy='random')
res = mixtures.fit_extents_model3(yUse, o.K, **fit_kwargs)
z, delta, pi, Pi, mu, Sigma, ll = res
_z = 255*np.ones(o.N, dtype=np.uint8)
_z[useIdx] = z[-1]
_delta = 255*np.ones(o.N, dtype=np.uint8)
_delta[useIdx] = delta[-1]
_z, _pi, _mu, _Sigma = CollectUsedParts(
o, _z, _delta, pi[-1], mu[-1], Sigma[-1])
# return last joint sample
return _z, _delta, _pi, Pi[-1], _mu, _Sigma, ll[-1]
def SampleLocs(o, delta, z, mu, Sigma, x0, P0):
""" Sample from latent posterior of linear dynamical system.
Return sample from x_{1:T} ~ p(X | Delta, Z, Mu, Sigma, x0, P0) where
X = x_{1:T}
Delta = delta_{1:N}^{1:T}
Z = z_{1:K,1:N}^{1:T}
Mu = mu_{1:K,1:P_k}^{1:T}
Sigma = Sigma_{1:K,1:P_k}^{1:T}
INPUT
o (opts):
delta ( [delta^1, ..., delta^T] ): Target associations for time t
delta^t (ndarray, [o.N,])
z ( [z^1, ..., z^T] ): Part associations for time t
z^t (ndarray, [o.N,])
mu ( [mu^1, ..., mu^T] ): Target part means for time t
mu^t ( [mu_1^t, ..., mu_K^t] )
mu_k^t (ndarray, [nParts_k, o.Dy_loc])
Sigma ( [Sigma^1, ..., Sigma^T] ): Target part covariances for time t
Sigma^t ( [Sigma_1^t, ..., Sigma_K^t] )
Sigma_k^t (ndarray, [nParts_k, o.Dy_loc, o.Dy_loc])
x0 (ndarray, [o.K, o.Dx,]): Target prior locations at time t = -1
P0 (ndarray, [o.K, o.Dx, o.Dx]): Target prior location covariance
at time t = -1
OUTPUT
x (ndarray, [T, o.K, o.Dx]): Joint sample of target locations
Pf (ndarray, [T, o.K, o.Dx, o.Dx]): Marginal target location covariances
(Pf[t,k] is NOT the covariance from the posterior draw of x[t,k] because
given x[t+1,k], this is just a function of the dynamics. Instead, it is
the marginal covariance, which more clearly conveys are uncertainty
about this portion of the joint sample.)
ll (ndarray, [T,K]): log-likelihood of the joint sample, x
"""
T = len(delta)
x = np.zeros((T, o.K, o.Dx))
Pf = np.zeros((T, o.K, o.Dx, o.Dx))
ll = np.zeros((T, o.K))
mu_k = list(zip(*mu))
Sigma_k = list(zip(*Sigma))
for k in range(o.K):
x[:,k], _, _, _, Pf[:,k], ll[:,k] = mixtures.conjugate.sample_lds(
mu_k[k], Sigma_k[k], o.Lambda[k], o.F, o.H, x0[k], P0[k])
return x, Pf, ll
def CollectUsedParts(o, z, delta, pi, mu, Sigma):
""" Remove truncated / non-associated parts.
INPUT
o (opts):
z (ndarray, [o.N,]):
delta (ndarray, [o.N,]):
pi (ndarray, [o.K, o.maxBreaks,]):
mu (ndarray, [o.K, o.maxBreaks, o.Dy_loc]):
Sigma (ndarray, [o.K, o.maxBreaks, o.Dy_loc, o.Dy_loc])
OUTPUT
pi (ndarray, [ pi_1, pi_2, ..., pi_k ]
pi_k (ndarray, [nParts_k]): mixture weights for target k
mu (ndarray, [ mu_1, mu_2, ..., mu_k ]
mu_k (ndarray, [nParts_k, o.Dy_loc])
Sigma (ndarray, [ Sigma_1, Sigma_2, ..., Sigma_k ]
Sigma_k (ndarray, [nParts_k, o.Dy_loc, o.Dy_loc])
"""
_pi = [ [] for k in range(o.K) ]
_mu = [ [] for k in range(o.K) ]
_Sigma = [ [] for k in range(o.K) ]
for k in range(o.K):
delta_k = delta==k+1
usedPartsIdx = np.unique( z[delta_k] ).astype(np.int)
nUsedParts = len(usedPartsIdx)
z[delta_k] = du.changem(z[delta_k], usedPartsIdx, range(nUsedParts))
_pi[k] = pi[k, usedPartsIdx]
_pi[k] = _pi[k] / np.sum(_pi[k])
_mu[k] = mu[k, usedPartsIdx]
_Sigma[k] = Sigma[k, usedPartsIdx]
return z, _pi, _mu, _Sigma
def ShowSample(o, rgb, delta, z, mu, Sigma, x=None):
if o.Dy_loc == 3: assert False, '3d visualization not ported yet.'
if type(rgb) == str: rgb = du.imread(rgb)
colors = du.diffcolors(o.K)
for k in range(o.K):
for p in range(len(mu[k])):
deltak_zp = np.logical_and(delta==k+1, z==p)
nAssociated = np.sum(deltak_zp)
if nAssociated == 0: continue
plt.plot(*du.stats.Gauss2DPoints(mu[k][p], Sigma[k][p]), c=colors[k])
rgb = du.DrawOnImage(rgb, np.nonzero(du.asShape(deltak_zp, (o.nY, o.nX))),
np.concatenate((colors[k], np.array([0.3,]))))
if x is not None:
plt.scatter(x[k,0], x[k,1], s=30, c=colors[k])
plt.imshow(rgb)
plt.ylim(0, o.nY)
plt.xlim(0, o.nX)
plt.gca().invert_yaxis()
def DrawSthSample(sampleDir, outDir, idx=-1):
o = du.load('%s/opts' % sampleDir)
imgs = du.load('%s/imgs' % sampleDir)
samples = du.GetFilePaths(sampleDir, 'samples.*gz')
sample = du.load(samples[idx])
DrawSaveAllSamples(o, outDir, imgs, sample)
def DrawSaveAllSamples(o, outDir, imgs, sample):
""" Draw all timesteps of sample (from build_sample).
INPUT
o (opts)
outDir (str): location of output images
imgs (list of strs): input images
sample (argparse.Namespace): sample over time
"""
T = len(imgs)
outs = ['%s/img-%08d.jpg' % (outDir, t) for t in range(T)]
pFunc = functools.partial(DrawSaveSample, o)
args = list(zip(outs, imgs, sample.delta, sample.z, sample.mu,
sample.Sigma))
du.ParforD(pFunc, args)
def DrawSaveSample(o, saveName, rgb, delta, z, mu, Sigma, x=None, dpi=300):
if o.Dy_loc == 3: assert False, '3d visualization not ported yet.'
if type(rgb) == str: rgb = du.imread(rgb)
fig, ax = plt.subplots()
colors = du.diffcolors(o.K)
for k in range(o.K):
for p in range(len(mu[k])):
deltak_zp = np.logical_and(delta==k+1, z==p)
nAssociated = np.sum(deltak_zp)
if nAssociated == 0: continue
ax.plot(*du.stats.Gauss2DPoints(mu[k][p], Sigma[k][p]), c=colors[k])
rgb = du.DrawOnImage(rgb, np.nonzero(du.asShape(deltak_zp, (o.nY, o.nX))),
np.concatenate((colors[k], np.array([0.3,]))))
if x is not None:
ax.scatter(x[k,0], x[k,1], s=30, c=colors[k])
ax.imshow(rgb)
ax.set_ylim(0, o.nY)
ax.set_xlim(0, o.nX)
ax.set_xticks([])
ax.set_yticks([])
ax.invert_yaxis()
fig.savefig(saveName, dpi=dpi, bbox_inches='tight')
| UTF-8 | Python | false | false | 11,822 | py | 15 | NPETracker.py | 14 | 0.595077 | 0.582812 | 0 | 390 | 29.312821 | 80 |
kanatatsu64/wordlist | 8,169,027,797,840 | 82de8522cf4d9d95f08a204b8e1e481fc022b0ae | f40a3a7aba33fe499f450a640563495bc29d2745 | /scripts/test-helper.py | 29cb07570dba21f3aefba1256d06425906f16e39 | []
| no_license | https://github.com/kanatatsu64/wordlist | d26c3ddc807adb632f368f3d72408cbdd6869b0a | bc54e87f3d888a9e9c7d5ee2540beae8547abd56 | refs/heads/master | 2023-02-03T22:39:16.892731 | 2020-12-13T17:28:18 | 2020-12-13T17:28:18 | 315,680,069 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import unittest
import yaml
import re
from helper import validate, convert, getScripts, getAllScripts, notice
class TestValidate(unittest.TestCase):
def testValidate1(self):
yamlStr = '\n'.join([
"{",
" example: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = True
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate2(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /^src//,",
" script: stack test,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = True
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate3(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" tests: /^src//,",
" script: stack test,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = False
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate5(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: ^src/,",
" script: stack test,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = False
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate6(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /^src//,",
" scripts: stack test,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = False
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate7(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" script: stack test,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = False
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate8(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /^src//,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = False
actual = validate(configRaw)
self.assertEqual(expected, actual)
def testValidate9(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /^src//,",
" script: stack test,",
" other: additional,",
" },",
" example2: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
expected = False
actual = validate(configRaw)
self.assertEqual(expected, actual)
class TestConvert(unittest.TestCase):
def testConvert1(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
actual = len(config)
expected = 1
self.assertEqual(expected, actual)
expected = ['name', 'test', 'script']
actual = config[0].keys()
self.assertCountEqual(expected, actual)
expected = 'example1'
actual = config[0]['name']
self.assertEqual(expected, actual)
expected = re.compile('^src/')
actual = config[0]['test']
self.assertEqual(expected, actual)
expected = 'stack test'
actual = config[0]['script']
self.assertEqual(expected, actual)
def testConvert2(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /^src1//,",
" script: stack test 1,",
" },",
" example2: {",
" test: /^src2//,",
" script: stack test 2,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
actual = len(config)
expected = 2
self.assertEqual(expected, actual)
for i, rec in enumerate(config):
expected = ['name', 'test', 'script']
actual = rec.keys()
self.assertCountEqual(expected, actual)
expected = "example"+str(i+1)
actual = rec['name']
self.assertCountEqual(expected, actual)
expected = re.compile("^src"+str(i+1)+"/")
actual = rec['test']
self.assertEqual(expected, actual)
expected = "stack test "+str(i+1)
actual = rec['script']
self.assertEqual(expected, actual)
class TestGetScripts(unittest.TestCase):
def testGetScripts1(self):
yamlStr = '\n'.join([
"{",
" example: {",
" test: /src/,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = ['base/src/path.hs']
name = config[0]['name']
expected = [notice(name) + 'stack test']
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
def testGetScripts2(self):
yamlStr = '\n'.join([
"{",
" example: {",
" test: /src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = ['base/src/path.hs']
name = config[0]['name']
expected = [notice(name) + 'stack test']
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
def testGetScripts3(self):
yamlStr = '\n'.join([
"{",
" example: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = ['base/src/path.hs']
expected = []
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
def testGetScripts4(self):
yamlStr = '\n'.join([
"{",
" example: {",
" test: /^src//,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = ['src/path.hs']
name = config[0]['name']
expected = [notice(name) + 'stack test']
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
def testGetScripts5(self):
yamlStr = '\n'.join([
"{",
" example: {",
" test: /src$/,",
" script: stack test,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = ['base/src/path.hs']
expected = []
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
def testGetScripts6(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: /src1//,",
" script: stack test 1,",
" },",
" example2: {",
" test: /src2//,",
" script: stack test 2,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = [
'base/src1/path.hs',
'base/src2/path.hs'
]
name1 = config[0]['name']
name2 = config[1]['name']
expected = [
notice(name1) + 'stack test 1',
notice(name2) + 'stack test 2'
]
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
def testGetScripts7(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: //src1//,",
" script: stack test 1,",
" },",
" example2: {",
" test: //src2//,",
" script: stack test 2,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
files = [
'base/src1/path1.hs',
'base/src1/path2.hs'
]
name1 = config[0]['name']
expected = [
notice(name1) + 'stack test 1'
]
actual = getScripts(config, files)
self.assertCountEqual(expected, actual)
class TestGetAllScripts(unittest.TestCase):
def testGetAllScripts1(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: //src1//,",
" script: stack test 1,",
" },",
" example2: {",
" test: //src2//,",
" script: stack test 2,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
name1 = config[0]['name']
name2 = config[1]['name']
expected = [
notice(name1) + 'stack test 1',
notice(name2) + 'stack test 2'
]
actual = getAllScripts(config)
self.assertCountEqual(expected, actual)
def testGetAllScripts2(self):
yamlStr = '\n'.join([
"{",
" example1: {",
" test: //src1//,",
" script: stack test 1,",
" },",
" example2: {",
" test: //src2//,",
" script: stack test 1,",
" }",
"}"
])
configRaw = yaml.load(yamlStr)
config = convert(configRaw)
name1 = config[0]['name']
name2 = config[1]['name']
names = sorted([name1, name2])
notices = ''.join(map(notice, names))
expected = [
notices + 'stack test 1'
]
actual = getAllScripts(config)
self.assertCountEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
| UTF-8 | Python | false | false | 11,475 | py | 146 | test-helper.py | 16 | 0.424488 | 0.414815 | 0 | 423 | 26.12766 | 71 |
pelkmanslab/brainy | 16,054,587,763,893 | 9c2ff865f83dee6af0a2ff58afcbaf41dd78f61a | 35e70af99db6ad018a7f861d597a7d108ca9d149 | /src/brainy/log.py | 7ae6d181f1b400943de78ef79dc7a75ab5c9fc95 | [
"MIT"
]
| permissive | https://github.com/pelkmanslab/brainy | 205cc350dcbece723b2f54b68f3d5fdf687f0869 | eb088be9585734c66be5d6ef0b7127a923f4066c | refs/heads/master | 2021-03-27T12:29:28.584197 | 2015-04-16T09:17:58 | 2015-04-16T09:17:58 | 23,793,124 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import atexit
from tree_output.houtput import HierarchicalOutput
from tree_output.log_handler import HierarchicalOutputHandler
LOGGING_OPTIONS = ('silent', 'console', 'json')
def make_console_handler(level):
console = logging.StreamHandler()
console.setLevel(level)
return console
def make_hierarchical_handler(format):
houtput = HierarchicalOutput.factory(format='json')
return HierarchicalOutputHandler(houtput=houtput)
# We always need a json handler for project reports.
json_handler = make_hierarchical_handler('json')
json_handler.setLevel(logging.INFO)
def output_json():
print str(json_handler.houtput)
def setup_logging(option, level=logging.INFO):
if option not in LOGGING_OPTIONS:
raise Exception('Unknown logging option: %s' % option)
logging.root.setLevel(level)
logging.root.addHandler(json_handler)
# 'silent handler is ignored'
if option == 'console':
console_handler = make_console_handler(level)
format_str = '%(asctime)s %(name)-30s %(levelname)-8s %(message)s'
datefmt_str = '%m-%d %H:%M'
console_handler.setFormatter(
logging.Formatter(format_str, datefmt_str))
logging.root.addHandler(console_handler)
elif option == 'json':
atexit.register(output_json)
| UTF-8 | Python | false | false | 1,319 | py | 49 | log.py | 41 | 0.70887 | 0.706596 | 0 | 44 | 28.977273 | 74 |
SanjaySathiraju/Voting-system-smart-ID-Face-recognition- | 5,454,608,481,047 | 5daccfd97cd298bf0891234e64b4218966ba0e6d | f8d404fa718d4c25157721da802c3fe3ef7cf0fb | /Source code/feature_vec.py | 74e1d2e222488997820c907b256fa00a80ac092a | []
| no_license | https://github.com/SanjaySathiraju/Voting-system-smart-ID-Face-recognition- | 33b2fa72d0919952c9ff56e0e3831e350d785852 | 23552e180b3788d17629a3cb55e99a35439168e1 | refs/heads/main | 2023-04-13T20:18:28.490738 | 2021-04-15T08:30:26 | 2021-04-15T08:30:26 | 358,161,555 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rectangulate import *
from numpy import zeros
from sys import argv
def engroup(fname):
'''
Generate the feature vector correspoonding to an iris
by using the rectangular iris image generated from
rectangulate.py;
From the normalized eye image obtained in the rectangle function
of rectangulate.py, the algorithm proposed by Jong Gook Ko et. al.
is used to obtain a set of feature vectors. These feature vectors
vary slightly among different images of the same eye, but show
wide variation among different eyes.
Parameters
----------
fname: string
The name of the image in string format.
Returns
-------
[Horizontal groups, Vertical groups]
Horizontal groups: A list of sequences of feature vectors
evaluated along the rows
Vertical groups: A list of sequences of feature vectors
evaluated along the columns.
'''
strip = rectangle(fname)
disp(strip)
grid = zeros([13, 36])
for i in range(13):
for j in range(36):
block = strip[3*i:3*i+3, 10*j:10*j+10]
for row in block:
grid[i, j] += sum(row)
# Group encoding
def encode(group):
avg = sum(group) / 5
group -= avg
for i in range(1, 5):
group[i] = sum(group[: i+1])
code = ''
argmax = 0
argmin = 0
for i in range(5):
if group[i] == max(group):
argmax = i
if group[i] == min(group):
argmin = i
for i in range(5):
if i < argmax and i < argmin:
code += '0'
if i > argmax and i > argmin:
code += '0'
if i >= argmax and i <= argmin:
code += '2'
if i <= argmax and i >= argmin:
code += '1'
return code
# Horizontal grouping
horgroups = []
for row in range(13):
horgroups.append([])
for col in range(32):
group = zeros(5)
for i in range(5):
group[i] = grid[row, col+i]
horgroups[row].append(encode(group))
# Vertical grouping
vergroups = []
for col in range(36):
vergroups.append([])
for row in range(9):
group = zeros(5)
for i in range(5):
group[i] = grid[row+i, col]
vergroups[col].append(encode(group))
return [horgroups, vergroups]
| UTF-8 | Python | false | false | 2,482 | py | 19 | feature_vec.py | 17 | 0.537873 | 0.521757 | 0 | 85 | 28.188235 | 70 |
lucasbiscaro/smmt_plugin | 13,288,628,823,295 | 8eb152b8cd97c490494d15de4e8cdf27d0089ba2 | a894b3d350e0f69dea5c36999d6b4a33286498f8 | /pluginbuilder/plugin_templates/toolbutton_with_dialog/plugin_template.py | 78d8cd523f74699b28a2ccef86a746ffc56f1dbb | []
| no_license | https://github.com/lucasbiscaro/smmt_plugin | acfa6dd31bcae4bd33f02037c1906faf3c2a1b5d | 6a93eea2f314820ed5ba17cc64bb1aff4514d528 | refs/heads/master | 2020-03-17T02:31:05.177143 | 2018-07-06T12:25:08 | 2018-07-06T12:25:08 | 133,193,283 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
PluginTemplate
A QGIS plugin
plugin_template
-------------------
begin : 2015-03-17
git sha : $Format:%H$
copyright : (C) 2015 by Pirmin Kalberer
email : pka@sourcepole.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from ..plugin_template import PluginTemplate
class ToolbuttonWithDialogPluginTemplate(PluginTemplate):
def descr(self):
return "Tool button with dialog"
def subdir(self):
return os.path.dirname(__file__)
def template_map(self, specification, dialog):
menu_text = dialog.template_subframe.menu_text.text()
menu = dialog.template_subframe.menu_location.currentText()
# Munge the plugin menu function based on user choice
if menu == 'Plugins':
add_method = 'addPluginToMenu'
remove_method = 'removePluginMenu'
else:
add_method = 'addPluginTo{}Menu'.format(menu)
remove_method = 'removePlugin{}Menu'.format(menu)
self.category = menu
return {
# Makefile
'TemplatePyFiles': '%s_dialog.py' % specification.module_name,
'TemplateUiFiles': '%s_dialog_base.ui' % specification.module_name,
'TemplateExtraFiles': 'icon.png',
'TemplateQrcFiles': 'resources.qrc',
'TemplateRcFiles': "resources.py",
# Menu
'TemplateMenuText': menu_text,
'TemplateMenuAddMethod': add_method,
'TemplateMenuRemoveMethod': remove_method,
}
def template_files(self, specification):
result = {
'module_name_dialog.tmpl':
'%s_dialog.py' % specification.module_name,
'module_name_dialog_base.ui.tmpl':
'%s_dialog_base.ui' % specification.module_name,
'resources.tmpl': 'resources.qrc',
}
if specification.gen_tests:
result.update({
'test/test_module_name_dialog.templ':
'test/test_%s_dialog.py' % specification.module_name,
'test/test_resources.templ': 'test/test_resources.py'
})
return result
def copy_files(self, specification):
return {
'icon.png': 'icon.png'
}
| UTF-8 | Python | false | false | 3,135 | py | 20 | plugin_template.py | 12 | 0.474322 | 0.469856 | 0 | 79 | 38.683544 | 79 |
btree1970/CountrySentiment | 9,268,539,472,665 | 7669028b5d424e5b769fb3e48320a30f5267b6b4 | b8146f9ca0ddfaea6e8de48a13b19623b088fd6f | /app.py | ce8c7e0c4677de489c1595cb35ef39e2f428e72e | []
| no_license | https://github.com/btree1970/CountrySentiment | f7e87fe9052e741c964e63185844baa03af903be | f8349653cebace67013d480446a46835179c68ae | refs/heads/master | 2022-12-16T07:32:28.537449 | 2020-01-04T00:55:14 | 2020-01-04T00:55:14 | 229,656,580 | 1 | 0 | null | false | 2022-12-11T18:32:00 | 2019-12-23T01:43:38 | 2020-07-22T13:33:20 | 2022-12-11T18:32:00 | 1,240 | 1 | 0 | 25 | Python | false | false | import tweepy
import json
import logging
import sys
import os
import re
from nlp import getSenimentScoreForTopic
from storageUtils import Storage
from dotenv import load_dotenv
load_dotenv()
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
_LOGGER.addHandler(consoleHandler)
CONSUMER_KEY = os.getenv("CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET")
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
ACCESS_TOKEN_SECRET = os.getenv("ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# _LOGGER.info('fdksjfksdlfj')
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
with open('data/stateAbbrev.json', "r") as states:
stateID = json.load(states)
# index = 0
# for state in stateID:
# if index > 45:
# geostates = api.geo_search(query=state, granularity="city")
# for geostate in geostates:
# if geostate.name == stateID[state]:
# print(geostate.name, geostate.id)
# index += 1
with open('data/stateID.json', "r") as states:
stateCodes = json.load(states)
topicsMap = json.load(open('data/topics.json', 'r'))
emoticons_happy = set([
':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}',
':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D',
'=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P',
'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)',
'<3'
])
emoticons_sad = set([
':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<',
':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c',
':c', ':{', '>:\\', ';('
])
def preprocess(tweet):
# remove urls and mentions
return ' '.join(re.sub("(@[A-Za-z0-9]+)|(\w+:\/\/\S+)", " ", tweet).split())
emotics = emoticons_happy.union(emoticons_sad)
def getTweetsForTopic(topic):
tweetsByState = {}
for state in stateCodes:
tweetsByState[state] = []
max_queries = 6
for state in stateCodes:
stateCode = stateCodes[state]
tweets = tweet_batch = api.search(q="place:{} {} lang:en ".format(stateCode,
topicsMap[topic]), result_type="mixed", count=100)
count = 1
#strange twitter search behaviour
#even though search doesn't exceed count there are more results on subsequent search
while count < max_queries:
tweet_batch = api.search(q="place:{} {} lang:en ".format(stateCode, topicsMap[topic]),
result_type="mixed",
count=100,
max_id=tweet_batch.max_id)
tweets.extend(tweet_batch)
count += 1
for tweet in tweets:
if len(tweet.text) > 10:
tweetsByState[state].append(
{
'text': preprocess(tweet.text),
'retweet_count': tweet.retweet_count,
'favourite_count': tweet.favorite_count
})
_LOGGER.info('state {} has loaded {} for topic: {}'.format(state, len(tweets), topicsMap[topic]))
fileName = 'data/twitterData/tweetState_{}.txt'.format(topic)
try:
Storage.upload(json.dumps(tweetsByState, ensure_ascii=False, indent=4), fileName)
_LOGGER.info('tweets for {} have been successfully loaded.'.format(topicsMap[topic]))
except IOError:
_LOGGER.error('unable to successfuly load files')
return tweetsByState
def loadAllTweetsandGetScores():
#loadAll Tweets
topicvals = topicsMap.keys()
if topicvals:
for topic in topicvals:
getTweetsForTopic(topic)
getSenimentScoreForTopic(topic)
else:
raise ImportError
| UTF-8 | Python | false | false | 4,349 | py | 16 | app.py | 7 | 0.534376 | 0.528627 | 0 | 134 | 31.425373 | 107 |
kegplant/Django_Intro_sessionWords | 2,645,699,883,046 | f359bbeabfe1487d061946a239161d09b2c48982 | 55a08a062e7bdc5971c0cb04ab280d39ac5fa68e | /apps/session_words/views.py | 36dab02bf5768d26b2d7d9cb6c4d9314aef7f9d3 | []
| no_license | https://github.com/kegplant/Django_Intro_sessionWords | a764092953b897bb00c2f3717343ecf252e3002d | a9c94c1bb68c58d7645c6cd7c3a5fd0a14addc1f | refs/heads/master | 2021-08-14T06:57:14.869869 | 2017-11-14T22:12:25 | 2017-11-14T22:12:25 | 110,751,014 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, HttpResponse, redirect
from datetime import datetime
# the index function is called when root is visited
def index(request):
try:
request.session['words']
except:
request.session['words']=[]
context={
'words':request.session['words']
}
#return HttpResponse(response)
return render(request,'session_words/index.html',context)
def process(request):
try:
request.session['words']
except:
request.session['words']=[]
if request.method=='POST':
try:
request.POST['isBig']
weight='bold'
except:
weight='normal'
newWord={
'word':request.POST['word'],
'color':request.POST['color'],
'weight':weight,
'date':datetime.now().strftime('%H:%M:%S%p, %b %d %Y')
}
request.session['words'].append(newWord)
request.session.modified = True
return redirect('/')
def clear(request):
del request.session['words']
#return HttpResponse(response)
return redirect('/') | UTF-8 | Python | false | false | 1,111 | py | 2 | views.py | 1 | 0.586859 | 0.586859 | 0 | 40 | 26.8 | 66 |
marcjasz/tomograf | 14,559,939,155,290 | fb1bc459bbdd2d6daa990f33b01bd2e3ad3a0e14 | 3bf68ff28a146876f57df0c8ca35170194ab7ff7 | /scanner.py | 09337a6aada4e0951c7430707697989494774617 | []
| no_license | https://github.com/marcjasz/tomograf | 40ef6c6001acb06161d692e9495c62c4953ed615 | ee489ddfb156b5c2f3a7ec5f4996b1333bf6b843 | refs/heads/master | 2021-02-16T15:06:20.401062 | 2020-03-30T18:56:36 | 2020-03-30T18:57:12 | 245,018,768 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from skimage import util
import numpy as np
import math
import functools
def normalize(num, bot, top):
if num > top:
return 1
if num < bot:
return 0
return (num - bot)/(top - bot)
def normalize_photo(photo):
bot, top = np.percentile(photo, (44, 98))
print(bot, top)
res = [[normalize(num, bot, top) for num in x] for x in photo]
return res
class Scanner:
def __init__(self, image, geometry_strategy):
self.image = np.array(image)
self.geometry = geometry_strategy
self.set_dimensions()
# center and radius of the inscribed circle
def set_dimensions(self):
self.width = self.image.shape[0]
self.xc = self.width/2 - 1
self.yc = self.width/2 - 1
self.r = self.width/2 - 2
def set_sampling_params(self, step=None, angle=None, detectors_number=None):
self.step = step
self.angle = angle
self.emitters_number = int(2 / step) - 1
self.detectors_number = detectors_number
@property
@functools.lru_cache()
def positions(self):
positions = []
for rotation in np.linspace(0, math.pi*(2-self.step), self.emitters_number):
positions.append({ 'rotation': rotation,
'samples': self.get_parallel_samples(rotation) })
return positions
def get_parallel_samples(self, rotation):
samples = []
detector_coords = self.get_detector_coords(rotation)[::-1]
emitter_coords = self.get_detector_coords(rotation+math.pi)
for i in range(len(detector_coords)):
line = self.geometry.get_line(*detector_coords[i], *emitter_coords[i])
samples.append({ 'emitter': emitter_coords[i],
'detector': detector_coords[i],
'line': np.array([self.to_plot_coords(coords) for coords in line]),
'value': 0.0 })
return samples
def get_fan_samples(self, rotation):
samples = []
emitter_coords = self.get_emitter(rotation)
for detector_coords in self.get_detector_coords(rotation):
line = self.geometry.get_line(*emitter_coords, *detector_coords)
samples.append({ 'emitter': emitter_coords,
'detector': detector_coords,
'line': np.array([self.to_plot_coords(coords) for coords in line]),
'value': 0.0 })
return samples
def lines(self):
lines = []
for position in self.positions:
lines.extend([self.geometry.get_line(*position['emitter'], *detector) for detector in position['detectors']])
return lines
# relative to the center, so angle is twice as big as at the edge
def get_detector_coords(self, rotation):
angle = self.angle / 2
rotation += math.pi
angles = np.linspace(-angle + rotation, angle + rotation, self.detectors_number)
return [(self.r*math.sin(x), self.r*math.cos(x)) for x in angles]
# relative to the center
def get_emitter(self, rotation):
return (self.r*math.sin(rotation), self.r*math.cos(rotation))
def to_square_img(self):
diff = self.image.shape[0] - self.image.shape[1]
padding = (0,)
if diff > 0:
padding = ((0, 0), (diff//2, diff - diff//2))
elif diff < 0:
padding = ((-math.ceil(diff/2), - diff + math.ceil(diff/2)), (0, 0))
self.image = util.pad(self.image, padding, 'constant')
self.set_dimensions()
return self
def to_plot_coords(self, coords):
return (int(-coords[1]+self.r-1), int(coords[0]+self.r-1))
@functools.lru_cache()
def generate_sinogram(self, steps=None):
res = []
# dla każdego położenia tomografu
for i, position in enumerate(self.positions):
if steps and i > steps:
break
row = []
# dla każdego detektora w obecnym położeniu tomografu
for sample in position['samples']:
# zbierz koordynaty punktów należących do linii między emiterem a detektorem
# weź ich wartości i dodaj do listy ich średnią
values = [self.image[coordsx[0], coordsx[1]] for coordsx in sample['line']]
sample['value'] = np.mean(values)
row.append(sample['value'])
res.append(row)
return np.array(res)
def filter_samples(self, kernel):
for position in self.positions:
values = [sample['value'] for sample in position['samples']]
values = np.convolve(values, kernel)
for i, sample in enumerate(position['samples']):
sample['value'] = values[i]
def inverse_radon_transform(self, steps=None):
# przygotuj sobie tablicę samych zer
res = [[0 for _ in i] for i in self.image]
# wszystko tak samo, tylko dodaj ładnie na każdej linii średnią zamiast zapisywać ją do tablicy
# dla każdego położenia tomografu
for i, position in enumerate(self.positions):
if steps and i > steps:
break
# dla każdego detektora w obecnym położeniu tomografu
for sample in position['samples']:
# zbierz koordynaty punktów należących do linii między emiterem a detektorem
for coordsx in sample['line']:
res[coordsx[0]][coordsx[1]] += sample['value']
return res
| UTF-8 | Python | false | false | 5,597 | py | 4 | scanner.py | 3 | 0.578512 | 0.570428 | 0 | 146 | 37.123288 | 121 |
bloomfieldfong/Evaluador-de-Expresiones | 14,602,888,828,246 | 632d1b6f83dc0ba2b61f63a2ab8a7936ad561543 | 84ccb58e09dc81fe67b923aea749478bc4e67383 | /main.py | 15e6fcfd148f28aeda4aca87376689c305a87981 | []
| no_license | https://github.com/bloomfieldfong/Evaluador-de-Expresiones | 7f225b14941651278210ac3916e1c7889495ab7c | d92843ec90fff3e7574352a45a7d8b681b9fbc7f | refs/heads/master | 2021-01-06T18:31:18.039868 | 2020-04-05T20:19:49 | 2020-04-05T20:19:49 | 241,440,470 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from funciones import *
from operator import itemgetter
from graphviz import Digraph
import os
from thomson_grafic import *
from subconjuntos import *
from directo2 import *
##environment de mi graficadora
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'
x = True
while x:
menu = input("1. Contruccion de Thomson \n2. Contruccion de Subconjuntos \n3. Constuccion de de DFA de multiples automatas (Thomson y Subconjuntos) \n4. Metodo directo\n")
##crea un grafo con la contruccion de Thomson
if menu == "1":
##ingrese el exppresion
ingreso = infix_to_postfix(expandir(input("Ingrese una expresion: ")))
i = 0
while i < len(ingreso):
if ingreso[i] == "(":
ingreso.pop(i)
i-=1
i+=1
print(ingreso)
##resultado son los movimientos y infin es el estado inicial y el final
resultado, infin = thomson_grafic(ingreso)
print(infin)
#imprime el automata
impresion(resultado,infin)
##Grafica nuestro automata
graficadora(resultado, infin)
mensaje = input("Ingrese el mensaje que desea saber si pertenece al lenguaje: ")
print(existe2(mensaje, resultado, infin))
##crea un grafo con la contruccion de Thomson y utiliza lols subconjuntos para realizar un dfa
if menu == "2":
##ingrese la expresion
ingreso = infix_to_postfix(expandir(input("Ingrese una expresion: ")))
print(ingreso)
##resultado son los movimientos y infin es el estado inicial y el final
i = 0
while i < len(ingreso):
if ingreso[i] == "(":
ingreso.pop(i)
i-=1
i+=1
resultado, infin = thomson_grafic(ingreso)
tabla, infin_nuevo = dfa_nfa(resultado, infin)
#imprime el automata
impresion(tabla,infin_nuevo)
print(infin_nuevo)
##Grafica nuestro automata
#graficadora(tabla, infin_nuevo)
##mensaje que queremos ver si existe
mensaje = input("Ingrese el mensaje que desea saber si pertenece al lenguaje: ")
print(infin_nuevo)
print(existe(mensaje, tabla, infin_nuevo))
if menu == "3":
## ingresa la cantidad de expresiones que se ingresaran
ingreso_cantidad = int(input("Cuantas expresiones ingresara: "))
resultados = []
infins = []
cantidad = 2
##realiza los automatas
for i in range((ingreso_cantidad)):
ingreso = infix_to_postfix(expandir(input("Ingrese una expresion: ")))
resultado, infin = thomson_grafic(ingreso, cantidad)
resultados.append(resultado)
infins.append(infin[0])
cantidad = infin[0][1] + 1
#nos realiza un automata por todas las
resultado, final = juntar_nfa(resultados, infins)
##nfa a dfa
tabla, infin_nuevo = dfa_nfa(resultado, final)
#impresion de el automata
impresion(tabla, infin_nuevo)
#grafica el automata
graficadora(tabla, infin_nuevo)
##mensaje que queremos ver si existe
mensaje = input("Ingrese el mensaje que desea saber si pertenece al lenguaje: ")
print(existe(mensaje, tabla, infin_nuevo))
if menu == "4":
ingreso = postfix_arbol(expandir(input("Ingrese una expresion: ")))
print(ingreso)
i = 0
while i < len(ingreso):
if ingreso[i] == "(":
ingreso.pop(i)
i-=1
i+=1
tabla,infin = directo(ingreso)
impresion(tabla, [infin])
graficadora(tabla,[infin])
mensaje = input("Ingrese el mensaje que desea saber si pertenece al lenguaje: ")
print(existe(mensaje, tabla, [infin]))
| UTF-8 | Python | false | false | 3,996 | py | 7 | main.py | 6 | 0.579079 | 0.571822 | 0 | 114 | 33.807018 | 176 |
ab3llini/Transformer-VQA | 7,739,531,087,483 | f885fbb6815c1e0c93975514f2a797c6babc47ca | c65674148db2fdefcd0a3a5441f72a9e692ff7c8 | /src/models/vggpt2v2/predict.py | 2cd6b24805ebf391b29e2240d1a0dae62271b635 | [
"MIT"
]
| permissive | https://github.com/ab3llini/Transformer-VQA | 5fe68ab11f1e553e4cd2f9cc14cd1996df615dfb | 46c50fb2748b9d372044d00b901f0cde91946684 | refs/heads/master | 2022-04-28T16:55:31.353689 | 2022-03-15T10:13:37 | 2022-03-15T10:13:37 | 187,615,528 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
this_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(this_path, os.pardir, os.pardir))
sys.path.append(root_path)
from torch.optim import Adam
from utilities.training.trainer import Trainer
from utilities.paths import resources_path
from datasets.light import LightDataset, pad_token
from modules.loss import VisualGPT2Loss
from models.light.model import LightVggGpt2, LightResGpt2, gpt2_tokenizer
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import nltk
def predict(model, dataset, decode_fn, stop_word, max_len, device='cuda:0'):
# Set the model in evaluation mode
model.eval()
model.to(device)
predictions = {}
loader = DataLoader(
dataset=dataset,
shuffle=True,
batch_size=1,
pin_memory=True,
num_workers=4
)
with torch.no_grad():
for it, batch in enumerate(tqdm(loader)):
__id = batch[0]
batch = batch[1:]
answer = []
batch = list(map(lambda tensor: tensor.to(device), batch))
stop_condition = False
its = 0
while not stop_condition:
out = model(*batch)
# Get predicted words in this beam batch
pred = torch.argmax(out[0, -1, :])
eos = (pred.item() in stop_word)
its += 1
stop_condition = eos or its > max_len
if not eos:
# Append the predicted token to the question
batch[0] = torch.cat([batch[0], pred.unsqueeze(0).unsqueeze(0)], dim=1)
# Append the predicted token to the answer
answer.append(pred.item())
predictions[str(__id.item())] = answer
# print('Done after {} => {}->{}'.format(its, __id, gpt2_tokenizer.decode(batch[0].squeeze(0).tolist())))
# print('What was saved to the prediction out > {}'.format(predictions[str(__id.item())]))
# print('After decode > {}'.format(gpt2_tokenizer.decode(predictions[str(__id.item())])))
# print('After custom decode fn > {}'.format(decode_fn(predictions[str(__id.item())])))
if decode_fn:
print('Decoding & NLTK encoding predictions with the provided tokenizer..')
predictions = dict(map(lambda item: (item[0], decode_fn(item[1])), predictions.items()))
return predictions
def nltk_decode_light_fn(pred):
try:
return nltk.word_tokenize(gpt2_tokenizer.decode(pred))
except Exception as e:
print('Exception while trying to decode {}.. Returning an empty string..'.format(pred))
return ''
if __name__ == '__main__':
model = LightVggGpt2()
model.load_state_dict(
torch.load(resources_path(
os.path.join('models', 'light', 'vgg-gpt2', 'checkpoints', 'latest', 'B_124_LR_5e-05_CHKP_EPOCH_19.pth'))))
dataset = LightDataset(resources_path(os.path.join('models', 'light', 'vgg-gpt2', 'data')), split='testing',
evaluating=True)
predict(model, dataset, decode_fn=nltk_decode_light_fn, max_len=20, stop_word=[gpt2_tokenizer.eos_token_id])
| UTF-8 | Python | false | false | 3,229 | py | 102 | predict.py | 57 | 0.602973 | 0.590895 | 0 | 91 | 34.483516 | 119 |
amrithajayadev/misc | 14,087,492,738,912 | c02c9aefd0c555e63561514cfb06ee8b93cee52c | dd5c06ab6f51a2ae2890eb604bb83f1d1a1ba8d3 | /recursion/permutation_case_change.py | f08b44f5134a41c6b2134c3490c02d94cb609a7c | []
| no_license | https://github.com/amrithajayadev/misc | 0c3995eb6f4748d803471fb3da693f23fa886ba5 | c316d85b639b53ebe9f6cd7c162a6f4287a431b4 | refs/heads/main | 2023-04-02T02:36:51.428221 | 2023-03-31T03:23:35 | 2023-03-31T03:23:35 | 304,224,989 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | inp = "a1B2"
diff = ord('a') - ord('A')
def generate(inp, i, out):
if i == len(inp):
print(out, end=" ")
return
generate(inp, i + 1, out + inp[i])
if ord('A') <= ord(inp[i]) < ord('a'):
case_change = chr(ord(inp[i]) + diff)
elif ord(inp[i]) >= ord('a'):
case_change = chr(ord(inp[i]) - diff)
else:
case_change = inp[i]
generate(inp, i + 1, out + case_change)
generate(inp, 0, "")
print("\n")
def generate2(inp, i, out):
if i == len(inp):
print(out, end=" ")
return
generate(inp, i + 1, out + inp[i].lower())
generate(inp, i + 1, out + inp[i].upper())
generate2(inp, 0, "")
| UTF-8 | Python | false | false | 675 | py | 200 | permutation_case_change.py | 199 | 0.493333 | 0.478519 | 0 | 33 | 19.454545 | 46 |
gitter-badger/magma | 7,945,689,535,466 | bbb1c65f79d5aa21d90d93b769cb0991cf1b62f1 | 8247ccd2a32a353f12d8f067aeae6c4c085665e9 | /magma/fromverilog.py | b8729e9d466ee2f7bbeeaf618b8f9b80c3f0fe1f | [
"MIT"
]
| permissive | https://github.com/gitter-badger/magma | 7ae0b511f24020d209923fdb8e448fc579ba59fa | c2f9eedffb9a921c9cefb0c742254e3adb522034 | refs/heads/master | 2020-08-14T05:53:03.005786 | 2019-10-14T17:18:03 | 2019-10-14T17:18:03 | 215,109,656 | 0 | 0 | NOASSERTION | true | 2019-10-14T17:50:49 | 2019-10-14T17:50:49 | 2019-10-14T17:18:06 | 2019-10-14T17:18:04 | 12,467 | 0 | 0 | 0 | null | false | false | from __future__ import absolute_import
from __future__ import print_function
from collections import namedtuple, OrderedDict
from mako.template import Template
from pyverilog.vparser.parser import VerilogParser, Node, Input, Output, ModuleDef, Ioport, Port, Decl
import pyverilog.vparser.parser as parser
from pyverilog.dataflow.visit import NodeVisitor
import pyverilog.vparser.ast as pyverilog_ast
from .t import In, Out, InOut
from .bit import Bit, _BitKind
from .bits import Bits, BitsKind
from .circuit import DeclareCircuit, DefineCircuit, EndDefine
from .passes.tsort import tsort
import logging
logger = logging.getLogger('magma').getChild('from_verilog')
__all__ = ['DeclareFromVerilog']
__all__ += ['DeclareFromVerilogFile']
__all__ += ['DeclareFromTemplatedVerilog']
__all__ += ['DeclareFromTemplatedVerilogFile']
__all__ += ['DefineFromVerilog']
__all__ += ['DefineFromVerilogFile']
__all__ += ['DefineFromTemplatedVerilog']
__all__ += ['DefineFromTemplatedVerilogFile']
class ModuleVisitor(NodeVisitor):
def __init__(self, shallow):
self.__shallow = shallow
self.defns = OrderedDict()
self.__defn_stack = []
self.__instances = {}
def visit_ModuleDef(self, defn):
if defn.name in self.defns:
raise Exception(f"Defn with name {defn.name} appears twice")
self.defns[defn.name] = defn
if self.__shallow:
return defn
# Collect instances in this definition.
self.__instances[defn] = set()
self.__defn_stack.append(defn)
self.generic_visit(defn)
self.__defn_stack.pop()
return defn
def visit_Instance(self, instance):
if self.__shallow:
return instance
defn = self.__defn_stack[-1]
assert instance not in self.__instances[defn]
self.__instances[defn].add(instance)
return instance
def get_instances(self, defn):
return self.__instances[defn]
def sort(self):
graph = []
for defn in self.defns.values():
insts = [inst.module for inst in self.get_instances(defn)]
graph.append((defn.name, insts))
sorted_ = tsort(graph)
defns = OrderedDict()
for defn_name, _ in sorted_:
defns[defn_name] = self.defns[defn_name]
self.defns = defns
def convert(input_type, target_type):
if isinstance(input_type, _BitKind) and \
isinstance(target_type, _BitKind) and \
input_type.direction == target_type.direction:
return target_type
if isinstance(input_type, BitsKind) and \
isinstance(target_type, BitsKind) and \
input_type.N == target_type.N and \
input_type.T.direction == target_type.T.direction:
return target_type
raise NotImplementedError(f"Conversion between {input_type} and "
f"{target_type} not supported")
def get_value(v, param_map):
if isinstance(v, pyverilog_ast.IntConst):
return int(v.value)
if isinstance(v, pyverilog_ast.Rvalue):
return get_value(v.var, param_map)
if isinstance(v, (pyverilog_ast.Minus, pyverilog_ast.Uminus)):
return get_value(v.left, param_map) - get_value(v.right, param_map)
if isinstance(v, pyverilog_ast.Plus):
return get_value(v.left, param_map) + get_value(v.right, param_map)
if isinstance(v, pyverilog_ast.Identifier):
return param_map[v.name]
else:
raise NotImplementedError(type(v))
def get_type(io, type_map, param_map):
if isinstance(io, Input):
direction = In
elif isinstance(io, Output):
direction = Out
else:
direction = InOut
if io.width is None:
type_ = Bit
else:
msb = get_value(io.width.msb, param_map)
lsb = get_value(io.width.lsb, param_map)
type_ = Bits[msb-lsb+1]
type_ = direction(type_)
if io.name in type_map:
type_ = convert(type_, type_map[io.name])
return type_
def ParseVerilogModule(node, type_map):
args = []
ports = []
param_map = {}
for param in node.paramlist.params:
for p in param.list:
param_map[p.name] = get_value(p.value, param_map)
for port in node.portlist.ports:
if isinstance(port, Ioport):
io = port.first
args.append(io.name)
args.append(get_type(io, type_map, param_map))
elif isinstance(port, Port):
ports.append(port.name)
else:
raise NotImplementedError(type(port))
if ports:
assert not args, "Can we have mixed declared and undeclared types in a Verilog module?"
for port in ports:
found = False
for child in node.children():
if isinstance(child, Decl):
for sub_child in child.children():
if isinstance(sub_child, (parser.Input, parser.Output, parser.Inout)) and \
sub_child.name == port:
args.append(sub_child.name)
args.append(get_type(sub_child, type_map, param_map))
found = True
break
if found:
break
else:
raise Exception(f"Could not find type declaration for port {port}")
return node.name, args
def FromVerilog(source, func, type_map, target_modules=None, shallow=False,
external_modules={}):
parser = VerilogParser()
ast = parser.parse(source)
visitor = ModuleVisitor(shallow)
visitor.visit(ast)
if not shallow:
visitor.sort()
def _get_lines(start_line, end_line):
if shallow:
return source
lines = source.split("\n")
return "\n".join(lines[start_line - 1:end_line])
if not external_modules.keys().isdisjoint(visitor.defns.keys()):
intersection = external_modules.keys() & visitor.defns.keys()
raise Exception(f"Modules defined in both external_modules and in "
f"parsed verilog: {intersection}")
magma_defns = external_modules.copy()
for name, verilog_defn in visitor.defns.items():
parsed_name, args = ParseVerilogModule(verilog_defn, type_map)
assert parsed_name == name
magma_defn = func(name, *args)
if func == DefineCircuit:
# Attach relevant lines of verilog source.
magma_defn.verilogFile = _get_lines(
verilog_defn.lineno, verilog_defn.end_lineno)
if not shallow:
for instance in visitor.get_instances(verilog_defn):
instance_defn = magma_defns[instance.module]
instance_defn()
EndDefine()
magma_defn.verilog_source = source
magma_defns[name] = magma_defn
if len(magma_defns) == 0:
logger.warning(f"Did not import any modules from verilog, either could "
f"not parse or could not find any of the target_modules "
f"({target_modules})")
# Filter back out external modules.
magma_defns = {name : magma_defns[name] for name in visitor.defns}
if target_modules is None:
return list(magma_defns.values())
# Filter modules based on target_modules list.
return [v for k, v in magma_defns.items() if k in target_modules]
def FromVerilogFile(file, func, type_map, target_modules=None, shallow=False, external_modules={}):
if file is None:
return None
verilog = open(file).read()
result = FromVerilog(verilog, func, type_map, target_modules, shallow, external_modules)
# Store the original verilog file name, currently used by m.compile to
# generate a .sv when compiling a circuit that was defined from a verilog
# file
for item in result:
item.verilog_file_name = file
return result
def FromTemplatedVerilog(templatedverilog, func, type_map, **kwargs):
verilog = Template(templatedverilog).render(**kwargs)
return FromVerilog(verilog, func, type_map)
def FromTemplatedVerilogFile(file, func, type_map, **kwargs):
if file is None:
return None
templatedverilog = open(file).read()
return FromTemplatedVerilog(templatedverilog, func, type_map, **kwargs)
def DeclareFromVerilog(source, type_map={}):
return FromVerilog(source, DeclareCircuit, type_map)
def DeclareFromVerilogFile(file, target_modules=None, type_map={}):
return FromVerilogFile(file, DeclareCircuit, type_map, target_modules)
def DeclareFromTemplatedVerilog(source, type_map={}, **kwargs):
return FromTemplatedVerilog(source, DeclareCircuit, type_map, **kwargs)
def DeclareFromTemplatedVerilogFile(file, type_map={}, **kwargs):
return FromTemplatedVerilogFile(file, DeclareCircuit, type_map, **kwargs)
def DefineFromVerilog(source, type_map={}, target_modules=None, shallow=False, external_modules={}):
return FromVerilog(source, DefineCircuit, type_map, target_modules,
shallow=shallow, external_modules=external_modules)
def DefineFromVerilogFile(file, target_modules=None, type_map={}, shallow=False, external_modules={}):
return FromVerilogFile(file, DefineCircuit, type_map, target_modules,
shallow=shallow, external_modules=external_modules)
def DefineFromTemplatedVerilog(source, type_map={}, **kwargs):
return FromTemplatedVerilog(source, DefineCircuit, type_map, **kwargs)
def DefineFromTemplatedVerilogFile(file, type_map={}, **kwargs):
return FromTemplatedVerilogFile(file, DefineCircuit, type_map, **kwargs)
| UTF-8 | Python | false | false | 9,657 | py | 154 | fromverilog.py | 98 | 0.635498 | 0.635083 | 0 | 261 | 36 | 102 |
nygeog/osgeo | 14,826,227,117,707 | 61db22b0b586a8c4e175e3d20160544d511d2147 | 015e6357c995d45716de2aa6fb5676a76b1e4b9a | /shapely_fiona_intersect/intersect.py | 77056ce8d2ac9e00e9b88730a991eb2783bc4c86 | []
| no_license | https://github.com/nygeog/osgeo | 7603e6671fd3b691f0a3d36862d36fe3aa3ff8a0 | f3e2a3c3069f36957862fe62bb01cebd9d0245a3 | refs/heads/master | 2021-01-18T21:16:43.585187 | 2016-06-28T13:12:10 | 2016-06-28T13:12:10 | 27,847,521 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import fiona
from shapely.geometry import shape
from copy import deepcopy
with fiona.open("planning_neighborhoods.shp", "r") as n:
with fiona.open("Schools_Private_Pt.shp", "r") as s:
# create a schema for the attributes
outSchema = deepcopy(s.schema)
outSchema['properties'].update(n.schema['properties'])
with fiona.open ("Schools_withNbhd.shp", "w", s.driver, outSchema, s.crs) as output:
for school in s:
for neighborhood in n:
# check if point is in polygon and set attribute
if shape(school['geometry']).within(shape(neighborhood['geometry'])):
school['properties']['neighborho'] = neighborhood['properties']['neighborho']
# write out
output.write({
'properties': school['properties'],
'geometry': school['geometry']
}) | UTF-8 | Python | false | false | 1,022 | py | 18 | intersect.py | 6 | 0.528376 | 0.528376 | 0 | 24 | 41.625 | 102 |
Graeberj/saidit | 137,438,974,152 | 7fe5827f58875ecd7d2ea3587e912a86e27ea235 | a9a074c24dc0300e769053755c5e1937e13cb2ac | /posts/models.py | 1f9d650f7ea7c26d05b0496b9257a2a62e29fcdd | []
| no_license | https://github.com/Graeberj/saidit | d86a7a7c6aaa0e592f6d06f295de948c909b1c19 | 7890d1fe76dde6ada7f6d251fafb40feb5000eac | refs/heads/main | 2023-08-31T08:20:10.786030 | 2021-10-20T03:53:54 | 2021-10-20T03:53:54 | 413,550,262 | 0 | 1 | null | false | 2021-10-19T22:08:54 | 2021-10-04T19:04:08 | 2021-10-19T20:06:52 | 2021-10-19T22:08:53 | 1,887 | 0 | 0 | 0 | HTML | false | false | from django.db import models
from saidituser.models import SaidItUser
from django.utils import timezone
from group.models import SubGroup
from django.contrib.auth.models import User
class Post(models.Model):
user = models.ForeignKey(SaidItUser, on_delete=models.CASCADE)
posted_in = models.ForeignKey(SubGroup, on_delete=models.CASCADE)
body = models.CharField(max_length=150)
time_created = models.DateTimeField(default=timezone.now)
like_dislike = models.BooleanField(default=False, choices=(
(True, 'like'), (False, 'dislike')))
like_count = models.IntegerField(default=0)
dislike_count = models.IntegerField(default=0)
# rate = models.ManyToManyField('', symmetrical=False, blank=True, related_name='+')
# notification
class Meta:
ordering = ["-time_created"]
def __str__(self):
return self.body
| UTF-8 | Python | false | false | 900 | py | 36 | models.py | 20 | 0.693333 | 0.687778 | 0 | 23 | 36.869565 | 88 |
akbarmenglimuratov/django-qa | 15,049,565,454,012 | f60c7e1948a8dca9b73c735a9e971a84b86876de | 87497377bc615360f735dc7742fb911f786a66d2 | /accounts/migrations/0010_auto_20190301_1955.py | e4e645254aa916bcbf525c9203ecc140055284f6 | []
| no_license | https://github.com/akbarmenglimuratov/django-qa | dd035aeae335883acf82306e7cb2129c6c3aa8a5 | 994700ca2a3b7d446ff4bca7bdd6e0c0b70b3c36 | refs/heads/master | 2020-05-09T16:31:41.761382 | 2020-03-03T17:18:14 | 2020-03-03T17:18:14 | 181,272,934 | 0 | 0 | null | false | 2020-06-05T20:22:18 | 2019-04-14T07:12:26 | 2020-03-03T17:18:42 | 2020-06-05T20:22:16 | 4,075 | 0 | 0 | 3 | Python | false | false | # Generated by Django 2.1.5 on 2019-03-01 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_user_data_image'),
]
operations = [
migrations.AlterField(
model_name='user_data',
name='fav_question',
field=models.ManyToManyField(blank=True, related_name='fav_question', to='questions.Question'),
),
migrations.AlterField(
model_name='user_data',
name='question',
field=models.ManyToManyField(blank=True, to='questions.Question'),
),
]
| UTF-8 | Python | false | false | 636 | py | 46 | 0010_auto_20190301_1955.py | 24 | 0.595912 | 0.566038 | 0 | 23 | 26.652174 | 107 |
xiaobogaga/littletalk | 16,956,530,897,227 | 796e544c3ff82f91dd24b7727b0d99839d848d35 | 443bd9a2e6085fb466d50b1ce44a8ad5e76be129 | /com/tomzhu/util/testDictJson.py | 452730c199e02a033d9a139d673eae20002e0c18 | []
| no_license | https://github.com/xiaobogaga/littletalk | b01a3679233aa8c34a2c7fb946a82acc18331ea0 | cce1c1ab048f43d8fa85e0e2f1b5e2a22acc0bf3 | refs/heads/master | 2018-02-08T03:59:32.286533 | 2017-07-08T02:57:33 | 2017-07-08T02:57:33 | 96,375,059 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import pickle
if __name__ == "__main__":
data = open("d://ques_voca_vec.bin" , "rb")
ques = pickle.load(data)
print(ques["我们"])
data.close()
| UTF-8 | Python | false | false | 185 | py | 13 | testDictJson.py | 12 | 0.552486 | 0.552486 | 0 | 9 | 19.111111 | 47 |
beevageeva/manchaNew | 2,336,462,248,328 | 1b64fa72b106bb6d19221c6f30bcad0c716b1ebb | 2526d7fb40388a663cd8d4d099b33ef11e47bc33 | /mancha_src/sample_tests/Acoustic_Wave3-2fl/create_equi-2fluids-equal.py | 051e7e94c2bd14f77c4dd1a5ddb45146792bcaf4 | []
| no_license | https://github.com/beevageeva/manchaNew | fbac3aba2aeda04f78ccd3f81521c24cfff4b166 | 74b42721bb64378556b017a12b74edab2f8bc1a6 | refs/heads/master | 2016-08-31T04:12:50.017968 | 2016-08-28T15:36:38 | 2016-08-28T15:36:38 | 50,166,684 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import h5py
from math import pi, sqrt
#make sure filename has h5 extension otherwise visit won't recognize it!
eqfilename = "eqmytestfile.h5"
pfilename = "pmytestfile.h5"
mx = 100
my = 100
mz = 100
rho0 = 1.0
pe0 = 2e-4
Te0 = 6000
mu0 = 1.0
mH = 1.66e-27 #kg
kB = 1.38e-23 #m2 kg s-2 K-1
Lx = 1.275e6
rho0 = pe0 * mu0 * (mH / kB) / Te0
pe0=1.0
rho0=1.0
Lx = 10.0
print(rho0)
Ly = Lx
Lz = Lx
dx = Lx/mx
dy = Ly/my
dz = Lz/mz
A = 0.004
gamma = 1.4
k = 2
cs0 = sqrt(gamma*pe0/rho0)
x = np.linspace(0,Lx, mx)
y = np.linspace(0,Ly, my)
z = np.linspace(0,Lz, mz)
zz, yy, xx = np.meshgrid(x,y,z, indexing='ij')
zero = np.zeros((mx,my,mz))
#pert z
pert = np.sin(zz * 2*pi*k/ Lx ) * A
presPert = gamma * pe0* pert
rhoPert = rho0 * pert
vzPert = cs0 * pert
vxPert = np.zeros((mx,my,mz))
vyPert = vxPert
with h5py.File(eqfilename, 'w') as f:
f.attrs['cellsize'] = [dx,dy,dz]
f.attrs['time'] = 0
f.attrs['metadata'] = [3,mx,my,mz,0]
#!!! lwz compression does not seem to work, do no use it
dset = f.create_dataset("pe_n", (mx, my, mz), dtype='f8', data=zero+pe0, chunks=True, shuffle=True)
dset = f.create_dataset("rho_n", (mx, my, mz), dtype='f8', data=zero+rho0, chunks=True, shuffle=True)
dset = f.create_dataset("pe_c", (mx, my, mz), dtype='f8', data=zero+2*pe0, chunks=True, shuffle=True)
dset = f.create_dataset("rho_c", (mx, my, mz), dtype='f8', data=zero+rho0, chunks=True, shuffle=True)
dset = f.create_dataset("bx", (mx, my, mz), dtype='f8', data=zero, chunks=True, shuffle=True)
dset = f.create_dataset("by", (mx, my, mz), dtype='f8', data=zero, chunks=True, shuffle=True)
dset = f.create_dataset("bz", (mx, my, mz), dtype='f8', data=zero, chunks=True, shuffle=True)
with h5py.File(pfilename, 'w') as f:
f.attrs['cellsize'] = [dx,dy,dz]
f.attrs['time'] = 0
f.attrs['metadata'] = [3,mx,my,mz,0]
#!!! lwz compression does not seem to work, do no use it
dset = f.create_dataset("pe_n", (mx, my, mz), dtype='f8', data=presPert, chunks=True, shuffle=True)
dset = f.create_dataset("rho_n", (mx, my, mz), dtype='f8', data=rhoPert, chunks=True, shuffle=True)
dset = f.create_dataset("vx_n", (mx, my, mz), dtype='f8', data=vxPert, chunks=True, shuffle=True)
dset = f.create_dataset("vy_n", (mx, my, mz), dtype='f8', data=vyPert, chunks=True, shuffle=True)
dset = f.create_dataset("vz_n", (mx, my, mz), dtype='f8', data=vzPert, chunks=True, shuffle=True)
dset = f.create_dataset("pe_c", (mx, my, mz), dtype='f8', data=2*presPert, chunks=True, shuffle=True)
dset = f.create_dataset("rho_c", (mx, my, mz), dtype='f8', data=rhoPert, chunks=True, shuffle=True)
dset = f.create_dataset("vx_c", (mx, my, mz), dtype='f8', data=vxPert, chunks=True, shuffle=True)
dset = f.create_dataset("vy_c", (mx, my, mz), dtype='f8', data=vyPert, chunks=True, shuffle=True)
dset = f.create_dataset("vz_c", (mx, my, mz), dtype='f8', data=vzPert, chunks=True, shuffle=True)
dset = f.create_dataset("bx", (mx, my, mz), dtype='f8', data=zero, chunks=True, shuffle=True)
dset = f.create_dataset("by", (mx, my, mz), dtype='f8', data=zero, chunks=True, shuffle=True)
dset = f.create_dataset("bz", (mx, my, mz), dtype='f8', data=zero, chunks=True, shuffle=True)
| UTF-8 | Python | false | false | 3,253 | py | 117 | create_equi-2fluids-equal.py | 41 | 0.632339 | 0.598524 | 0 | 103 | 30.563107 | 104 |
OppiHmiK/Python_Study | 19,404,662,273,012 | 0f9d1133bd0a6b803e1309bb1d1a9bbf282ffe00 | b2bc6bf4aa384382be95a3d7cc9d334ccce0ee42 | /Study_from_Windows/Vanilla/Pickle/pickle_1.py | d190c93272243da92d2bf9c544c4e3dab990bde6 | []
| no_license | https://github.com/OppiHmiK/Python_Study | 24de12a99956779abfe43ea10b9cec1c75273517 | 5e5784f332c7e152c879f3465cdd28f2e9f7d921 | refs/heads/master | 2021-09-11T09:06:19.104869 | 2021-09-02T00:12:49 | 2021-09-02T00:12:49 | 194,117,175 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding : utf-8 -*-
import pickle as pck
lis = ['a', 'b', 'c']
with open('test.txt', 'wb') as f:
pck.dump(lis, f)
| UTF-8 | Python | false | false | 136 | py | 190 | pickle_1.py | 119 | 0.470588 | 0.463235 | 0 | 7 | 16.857143 | 33 |
Bhagyarsh/mogambo | 16,990,890,646,603 | 718acd7dce0c270552e0c68a655724529eaa3ad3 | b2241942315a8e9ec96abdfc3572a889a91d2208 | /mogambo/SoftwareData/api/API_TEST/t.py | fd57b05ef2ff091be2075b4d2e04479b5b1eba2e | []
| no_license | https://github.com/Bhagyarsh/mogambo | c9bf1174aca31955accb109c0df7ee2292172743 | 7e00fcadd9c6111a9ac0ef429920ea4e4f018428 | refs/heads/master | 2023-01-14T07:29:55.808968 | 2019-02-03T05:36:15 | 2019-02-03T05:36:15 | 164,221,549 | 2 | 0 | null | false | 2023-01-04T20:56:59 | 2019-01-05T14:22:08 | 2019-10-19T11:14:32 | 2023-01-04T20:56:55 | 2,489 | 1 | 0 | 52 | Python | false | false | import base64
import json
img = json.dumps(base64.b64encode(open('inkscape.jpg', 'rb').read()))
print(img) | UTF-8 | Python | false | false | 106 | py | 45 | t.py | 32 | 0.735849 | 0.679245 | 0 | 4 | 25.75 | 69 |
caifi2/scikitLearn | 6,167,573,050,744 | 15a7c9c1ffb84be661c454fc34786065b706bb36 | e61a60542f4b1b1abca3c89db691a91c34f2c860 | /numpy_pandas/GridSearchCV_VS_RandomizedSearchCV.py | 258f5b28f5cb7c5a43dd6a28ce3e9409a5a54b5a | []
| no_license | https://github.com/caifi2/scikitLearn | 28cbe5081d1cc312f49a1403febebcb6978c8e7d | 71e1b95d1d509063bbeffe115ebc005299f42ee5 | refs/heads/master | 2018-09-19T10:35:39.541782 | 2017-06-13T07:40:13 | 2017-06-13T07:40:13 | 93,704,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from time import time
from scipy.stats import randint as sp_tandint
from sklearn.datasets import load_digits
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import numpy as np
#网格搜索 VS 随机搜索
#选取最优参数
def report(results, n_top = 3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print('Model with rank: {0}'.format(i))
print('Mean validation score:{0:.3f}(std:{1:.3f})'.format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]
))
print("Parameters:{0}".format(results['params'][candidate]))
print("")
#取得数据,手写字符分类
digits = load_digits()
X, y = digits.data, digits.target
#构建一个分类器
clf = RandomForestClassifier(n_estimators=20)
print('======RandomizedSearchCV result========')
#设置想要优化的超参数以及他们的取值分布
param_dist = {
"max_depth": [3, None],
"max_features": sp_tandint(1, 11),
"min_samples_split": sp_tandint(2, 11),
"min_samples_leaf": sp_tandint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
}
#开启超参数空间的随机搜索
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates parameter settings."%((time() - start), n_iter_search))
report(random_search.cv_results_)
print('======GridSearchCV result========')
param_grid = {
"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
}
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidates parameter settings."%((time() - start), len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
| UTF-8 | Python | false | false | 2,255 | py | 40 | GridSearchCV_VS_RandomizedSearchCV.py | 35 | 0.63504 | 0.617191 | 0 | 61 | 32.901639 | 136 |
gouvina/aprendaut2019 | 4,930,622,484,282 | 0479b52e95ccc858140a6ca255397bd2d6fa5ad9 | a373cd328800dc333de480820ef94947e16d039e | /lab4/processing/parser.py | b094a4f8041d3f61680df0748a4979f652ca849f | []
| no_license | https://github.com/gouvina/aprendaut2019 | 3d0e2c42142a95c43a9f7748781f2db1991f9477 | 8db885586c3c3e1883925fe240ae7e6a89fa7110 | refs/heads/master | 2020-04-25T19:12:16.384053 | 2019-12-02T14:39:08 | 2019-12-02T14:39:08 | 173,011,479 | 1 | 0 | null | false | 2019-12-02T14:39:09 | 2019-02-28T00:21:45 | 2019-11-05T01:17:20 | 2019-12-02T14:39:08 | 70,944 | 2 | 0 | 0 | Jupyter Notebook | false | false | ### DEPENDENCIAS
### ------------------
import numpy as np
from utils.const import CandidateDivision
### METODOS PRINCIPALES
### -------------------
# Dada una lista de candidatos, devuelve sus respectivos partidos
# Se retorna una lista de tuplas (id, nombre, candidatos) para los partidos
# Y una lista de partidos asignados para cada candidato en 'candidates'
def parseCandidates(candidates, partyJSON):
parties = np.zeros(len(candidates), dtype = int)
# Preprocesamiento
pairs = []
for i in range(0, len(partyJSON)):
partyCandidates = []
for candidate in partyJSON[i]['candidates']:
partyCandidates.append(candidate['id'])
pairs.append((i, partyJSON[i]['party'], partyCandidates))
# Sustitucion
index = 0
for candidate in candidates:
for party, partyName, partyCandidates in pairs:
if candidate in partyCandidates:
parties[index] = party
index += 1
break
return pairs, parties
# El proceso inverso a la función anterior
def parseCandidatesFromParties(candidatesJSON, candidates):
auxDict = {}
for i in range(0, len(candidatesJSON)):
auxDict[candidatesJSON[i]['id']] = candidatesJSON[i]['name']
res = []
for candidate in candidates:
if not candidate in auxDict.keys():
res.append((candidate, 'Candidato desconocido'))
else:
res.append((candidate, auxDict[candidate]))
return res
# Dado un candidato, devuelve su partido en caso de no encontrarse en parsedParties
def getParty(parsedParties, candidate, division):
if division == CandidateDivision.PARTIES:
if candidate == 7: # Partido Nacional???
return 6 # Partido Nacional
if candidate == 30: # ??????
return 0 # Frente Amplio
elif division == CandidateDivision.SPECTRUM:
if candidate == 7: # Partido Nacional???
return 2 # Derecha
if candidate == 30: # ??????
return 0 # Izquierda
elif division == CandidateDivision.DUAL_SPECTRUM:
if candidate == 7: # Partido Nacional???
return 1 # Derecha
if candidate == 30: # ??????
return 0 # Izquierda
elif division == CandidateDivision.NOLAN:
if candidate == 7: # Partido Nacional???
return 3 # Conservadurismo
if candidate == 30: # ??????
return 0 # Progresismo
for party, partyName, partyCandidates, partyCount in parsedParties:
if candidate in partyCandidates:
return party | UTF-8 | Python | false | false | 2,595 | py | 332 | parser.py | 64 | 0.617579 | 0.608327 | 0 | 78 | 32.269231 | 83 |
AK-1121/code_extraction | 816,043,787,626 | 3028d8bab52be76927f5403c3491a6eb5c25b105 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27822.py | 7f88a597a5e680ef1da04f553dfeb58ec0d5bbbd | []
| no_license | https://github.com/AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # how to add element to json list - python
data["list"].append({'b':'2'})
| UTF-8 | Python | false | false | 74 | py | 29,367 | python_27822.py | 29,367 | 0.635135 | 0.621622 | 0 | 2 | 36 | 42 |
Marstaable/Repo_warsztaty | 14,130,442,415,321 | 9ad19132b042873b1a05052956009e39c8e4971e | f187ce976dbd2eaad83183f146bc6699e03561a3 | /week5/fabryka.py | 1472a609ba97c1e5006f9956b918762aa5cae394 | []
| no_license | https://github.com/Marstaable/Repo_warsztaty | e3e43b96ca2738b941e84f525653d01f6e8a5b46 | b2f973dcab85034e44d9719bfdd915016bbb678d | refs/heads/master | 2020-03-29T23:02:54.926387 | 2019-02-18T08:21:35 | 2019-02-18T08:21:35 | 150,453,671 | 0 | 0 | null | false | 2019-02-18T08:21:36 | 2018-09-26T16:04:35 | 2018-10-24T17:02:48 | 2019-02-18T08:21:36 | 1,604 | 0 | 0 | 0 | Python | false | null | from week5.samochod import Samochod
from week5.silnik import Silnik
# samochod ma miec silnik :P
silnik = Silnik(1.7,2000)
samochod1 = Samochod("czerwony","opel","astra",silnik)
# samochod2 = Samochod("zielony","VW","Najnowszy:P")
silnik = Silnik(5.4, 1000)
samochod2 = Samochod("zielony","VW","Najnowszy",silnik)
# silnik1 = Silnik(1.7,2000)
# silnik2 = Silnik(5.4,10000)
# def porownaj():
# # if samochod1.silnik.ilosc_koni > samochod2.silnik.ilosc_koni:
# # print("samochod1")
# # elif samochod2.silnik.ilosc_koni > samochod1.silnik.ilosc_koni:
# # print("samochod2")
# # else:
# # print("Błąd - Sprobuj jeszcze raz")
# #
# # porownaj()
# fromsrting
# tostring
def ktory_szybszy(s1,s2):
if s1.silnik.ilosc_koni>s2.silnik.ilosc_koni:
return s1
else:
return s2 | UTF-8 | Python | false | false | 827 | py | 74 | fabryka.py | 65 | 0.658182 | 0.604848 | 0 | 30 | 26.533333 | 71 |
Mistyhops/task_D16 | 13,280,038,910,025 | 26b42520ed6fdfc14b5c1984d715e29104c90f3d | f62ae2ec9c49366642397acca8d043baa8ce2e30 | /project/announcements/tasks.py | a8482e2ea1258e315a7183414752a6b7baace82f | []
| no_license | https://github.com/Mistyhops/task_D16 | 50a54e638222152e7b1de3feda96e42158636cbb | 68266bb6bf0ef5e3b458195bddd674497d220e35 | refs/heads/master | 2023-08-04T18:02:10.187911 | 2021-09-14T22:28:06 | 2021-09-14T22:28:06 | 405,493,060 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from celery import shared_task
from django.template.loader import render_to_string
from django.core.mail import get_connection, EmailMultiAlternatives
from django.conf import settings
from accounts.models import CustomUser
from .services import get_announcement_list_for_last_week_for_selected_user
@shared_task
def regular_subscribers_email_newsletter():
connection = get_connection()
connection.open()
messages = []
for user in CustomUser.objects.all():
if get_announcement_list_for_last_week_for_selected_user(user):
html_content = render_to_string(
'email/weekly_email_newsletter.html',
{
'username': user.username,
'announcement_list': get_announcement_list_for_last_week_for_selected_user(user)['announcement_list']
}
)
msg = EmailMultiAlternatives(
subject='',
body='',
from_email=settings.DEFAULT_FROM_EMAIL,
to=[user.email],
connection=connection,
)
msg.attach_alternative(html_content, 'text/html')
messages.append(msg)
connection.send_messages(messages)
connection.close()
| UTF-8 | Python | false | false | 1,263 | py | 30 | tasks.py | 21 | 0.619161 | 0.619161 | 0 | 38 | 32.236842 | 121 |
mam288/bioinformatics-VI | 2,362,232,051,104 | c48abd814f28fcda66783c3e48af09846fca7f6c | 0a7665e696a0fe75e6243fc9624860b0d03cc675 | /wk5_05_viterbi_learning_HMM.py | bc61292780103345be870499fd81d1224c0ab4ff | []
| no_license | https://github.com/mam288/bioinformatics-VI | e4bcbd10a99869362811447c14c93f2ea6827cf8 | bda033a75522002feed7d34c3cc446172b22a3ae | refs/heads/master | 2021-01-21T05:15:20.243488 | 2017-03-02T21:21:24 | 2017-03-02T21:21:24 | 83,160,983 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Solution to Implement Viterbi Learning
Finding Mutations in DNA and Proteins (Bioinformatics VI) on Coursera.org
Week 5,code challenge #5
https://stepik.org/lesson/Hidden-Markov-Models-Code-Challenges-(Week-2)-11632/step/10?course=Stepic-Interactive-Text-for-Week-5&unit=9009
'''
import networkx as nx
import numpy as np
def viterbi_learn(input_transition_matrix,input_emission_matrix,states,emission_letters,emission_string,iterations):
'''
Implement Viterbi learning for estimating the parameters of an HMM.
Parameters
--------
input_transition_matrix: Initial transition matrix for the HMM (string)
input_emission_matrix: Initial emission matrix for the HMM (string)
states: the HMM's states (list of strings)
emission_letters: HMM's alphabet Σ (list of strings)
emission_string: string x of symbols emitted by an HMM (string)
iterations: a number of iterations j (integer)
Returns
--------
None
Print output: Emission and transition matrices resulting from applying
Viterbi learning for j iterations.
'''
hidden_path = viterbi(input_transition_matrix,input_emission_matrix,states,emission_letters,emission_string) # find the hidden path for the givven matrices and emission string
new_emission_matrix,new_transition_matrix = estimate_HMM_parameters(emission_string,hidden_path,states,emission_letters) # use the path to calculate new emission and transition matrices
# for a given number of iterations use the new matrices to calculate a new hidden path and then use that path to calculate new matrices
for i in range(iterations):
hidden_path = viterbi(new_transition_matrix,new_emission_matrix,states,emission_letters,emission_string)
new_emission_matrix,new_transition_matrix = estimate_HMM_parameters(emission_string,hidden_path,states,emission_letters)
print_matrix(new_transition_matrix,states,states)
print ('--------')
print_matrix(new_emission_matrix,states,emission_letters)
def estimate_HMM_parameters(emission_string,hidden_path,states,emission_letters):
'''
Estimate the transition matrix and emission matrix given a hidden path and emission string
'''
def create_transition_matrix(emission_string,hidden_path,states):
'''
Creates a transition matrix using the hidden path,emission string and states.
Output: Transition matrix as a numpy array
'''
transition_matrix = np.zeros((len(states),len(states)))
# add up the number of times the hidden path transitions from each state to each other state
for i in range(len(hidden_path)-1):
state_1 = hidden_path[i]; state_2 = hidden_path[i+1] # get the state at i and i+1 in the hidden path
state_1_index = states.index(state_1); state_2_index = states.index(state_2) # get the index for each state
transition_matrix[state_1_index][state_2_index] += 1
# add pseudo counts and normalize the matrix
for i in range(len(states)):
row_sum = sum(transition_matrix[i])
if row_sum == 0:
transition_matrix[i] += 0.01 # add pseudo counts
transition_matrix[i] = transition_matrix[i]/row_sum
return transition_matrix
def create_emission_matrix(emission_string,hidden_path,states,emission_letters):
emission_matrix = np.zeros((len(states),len(emission_letters)))
# add up the number of times a letter is emitted for each state in the hidden path
for i in range(len(hidden_path)):
emission = emission_string[i]
emission_index = emission_letters.index(emission)
state = hidden_path[i]
state_index = states.index(state)
emission_matrix[state_index][emission_index] +=1
# add pseudo counts and normalize the matrix
for i in range(len(states)):
row_sum = sum(emission_matrix[i])
if row_sum == 0:
emission_matrix[i] += 0.01
emission_matrix[i] = emission_matrix[i]/row_sum
return emission_matrix
emission_matrix = create_emission_matrix(emission_string,hidden_path,states,emission_letters)
transition_matrix = create_transition_matrix(emission_string,hidden_path,states)
return emission_matrix,transition_matrix
def viterbi(transition_matrix_input,emission_matrix_input,states,emission_letters,emission_string):
def create_matrices(transition_matrix_input,emission_matrix_input,states,emission_letters):
'''
Convert the transition and emission matrices from strings to numpy arrays
'''
num_emission_letters = len(emission_letters)
num_states = len(states)
transition_matrix = np.zeros((num_states,num_states))
emission_matrix = np.zeros((num_states,num_emission_letters))
for i in range(1,num_states+1):
t_row = transition_matrix_input.split('\n')[i]
t_row = [float(x) for x in t_row.split()[1:]]
transition_matrix[i-1] = t_row
e_row = emission_matrix_input.split('\n')[i]
e_row = [float(x) for x in e_row.split()[1:]]
emission_matrix[i-1] = e_row
return emission_matrix,transition_matrix
def create_viterbi_graph(transition_matrix_input,emission_matrix_input,states,emission_letters,emission_string):
'''
Create the viterbi graph with weighted edges using the given transition and emission matrices and emission string.
'''
viterbi_graph = nx.DiGraph()
num_states = len(states)
if type(transition_matrix_input) == np.ndarray:
transition_matrix = transition_matrix_input
emission_matrix = emission_matrix_input
else:
# convert the matrices to numpy arrays if they are not already arrays.
emission_matrix,transition_matrix = create_matrices(transition_matrix_input,emission_matrix_input,states,emission_letters)
for i in range(len(emission_string)+1):
if i == len(emission_string):
index = 0
else: # find the index of the letter emitted at i
index = emission_letters.index(emission_string[i])
# calculate the 'weight' for each edge in the graph
for outgoing_index in range(len(states)):
outgoing_state = states[outgoing_index]
emission_weight = emission_matrix[outgoing_index][index]
if i == 0:
incoming_state = ''
transition_weight = 0.5
viterbi_graph.add_edge(str(i),str(i+1) + outgoing_state,{'weight': emission_weight*transition_weight})
else:
for incoming_index in range(num_states):
incoming_state = states[incoming_index]
if i == len(emission_string):
transition_weight = 1; emission_weight = 1; outgoing_state = ''
viterbi_graph.add_edge(str(i) + incoming_state,str(i+1),{'weight': emission_weight*transition_weight})
continue
transition_weight = transition_matrix[incoming_index][outgoing_index]
viterbi_graph.add_edge(str(i) + incoming_state,str(i+1) + outgoing_state,{'weight': emission_weight*transition_weight})
return viterbi_graph
def calculate_scores_viterbi(viterbi_graph):
'''
Go through the viterbi graph in topological order and score each node.
'''
viterbi_graph.node['0']['score'] = 1
sorted_nodes = nx.topological_sort(viterbi_graph)
backtrack = {}
for node in sorted_nodes[1:]:
predecessors = viterbi_graph.predecessors(node)
high_score = 0
backtrack_node = ''
# calculate the score from each predecessor to 'node' and set it as the high score if it is greater than the current high score
for predecessor in predecessors:
score = viterbi_graph.node[predecessor]['score']*viterbi_graph[predecessor][node]['weight']
if score > high_score:
high_score = score
backtrack_node = predecessor
viterbi_graph.node[node]['score'] = high_score
backtrack[node] = backtrack_node # record which predecessor was used to calculate the high score
return backtrack
def reconstruct_path(viterbi_graph,backtrack):
'''Reconstruct the hidden path from the scored viterbi_graph'''
last_node = str(len(emission_string) + 1)
path = ''
current_node = last_node
predecessor = backtrack[current_node]
while predecessor != '0':
path = predecessor[-1] + path
current_node = predecessor
predecessor = backtrack[current_node]
return path
viterbi_graph = create_viterbi_graph(transition_matrix_input,emission_matrix_input,states,emission_letters,emission_string)
backtrack= calculate_scores_viterbi(viterbi_graph)
path = reconstruct_path(viterbi_graph,backtrack)
return path
def print_matrix(matrix,rows,columns):
'''Print the inputted matrix'''
column_string = '\t'
for r in range(len(columns)):
column_string += columns[r] + '\t'
print column_string[:-1]
for r in range(len(rows)):
row = rows[r] + '\t'
for c in range(len(columns)):
num = matrix[r][c]
num = round(num,3)
row += str(num) + '\t'
print row[:-1]
####################################################################################
if __name__ == "__main__":
#sample_data
transition_matrix = '''A B
A 0.599 0.401
B 0.294 0.706'''
emission_matrix = '''x y z
A 0.424 0.367 0.209
B 0.262 0.449 0.289'''
iterations = 10
emission_string = 'zyzxzxxxzz'
emission_letters = 'x y z'.split()
states = 'A B'.split()
viterbi_learn(transition_matrix,emission_matrix,states,emission_letters,emission_string,iterations) | UTF-8 | Python | false | false | 10,322 | py | 23 | wk5_05_viterbi_learning_HMM.py | 22 | 0.626199 | 0.616122 | 0 | 213 | 47.460094 | 189 |
taddes/python-blockchain | 8,220,567,441,588 | 0c0688fcca7bb9c4f61bcefc33946efeed048fd4 | 032aee36fd2611f3c56956afc12f1b4503b04180 | /files.py | d5668391ba3ab405b03aa48775f12e57510e9acb | []
| no_license | https://github.com/taddes/python-blockchain | 57ba1d057709bc545062dfbcd26f39dd9234e863 | beaf1a44b9e71d2815d133fb1d02caef0aac3e7c | refs/heads/master | 2020-04-24T07:09:53.094143 | 2019-06-28T17:40:55 | 2019-06-28T17:40:55 | 171,789,131 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
f = open('demo.txt', mode='r')
file_content = f.readlines()
for line in file_content:
# [:-1] removes newline character
print(line[:-1])
f.close()
# line = f.readline()
# while line:
# print(line)
# line = f.readline()
"""
with open('demo.txt', mode='w') as f:
f.write('Testing this')
| UTF-8 | Python | false | false | 304 | py | 21 | files.py | 10 | 0.598684 | 0.592105 | 0 | 18 | 15.833333 | 37 |
tazjel/python-ifcfg | 3,246,995,319,213 | 9b950f5250c002731bcf21e510a1b9cfa86e72e4 | 561bd114738466162b8ef5dc3dc10ba4715cd648 | /ifcfg/parser.py | 1c71815a50bc56b14f94719b016a395e62519f39 | [
"BSD-3-Clause"
]
| permissive | https://github.com/tazjel/python-ifcfg | b79c316c876baa8c95e5df135ad4cdbf992315b4 | 009a27e8ad70e4aa85e48dd473296593a3c75330 | refs/heads/master | 2016-02-28T02:56:26.461085 | 2012-06-05T23:20:40 | 2012-06-05T23:20:40 | 6,483,091 | 3 | 3 | null | false | 2015-12-02T15:54:42 | 2012-11-01T01:03:11 | 2015-06-28T21:52:17 | 2012-06-06T20:08:33 | 171 | 3 | 8 | 2 | Python | null | null |
import re
import socket
from .meta import MetaMixin
from .tools import exec_cmd, hex2dotted, minimal_logger
Log = minimal_logger(__name__)
class IfcfgParser(MetaMixin):
class Meta:
ifconfig_cmd_args = ['ifconfig', '-a']
patterns = [
'(?P<device>^[a-zA-Z0-9]+): flags=(?P<flags>.*) mtu (?P<mtu>.*)',
'.*(inet )(?P<inet>[^\s]*).*',
'.*(inet6 )(?P<inet6>[^\s]*).*',
'.*(broadcast )(?P<broadcast>[^\s]*).*',
'.*(netmask )(?P<netmask>[^\s]*).*',
'.*(ether )(?P<ether>[^\s]*).*',
'.*(prefixlen )(?P<prefixlen>[^\s]*).*',
'.*(scopeid )(?P<scopeid>[^\s]*).*',
'.*(ether )(?P<ether>[^\s]*).*',
]
override_patterns = []
def __init__(self, *args, **kw):
super(IfcfgParser, self).__init__(*args, **kw)
self._interfaces = {}
self.ifconfig_data = kw.get('ifconfig', None)
self.parse(self.ifconfig_data)
def _get_patterns(self):
return self._meta.patterns + self._meta.override_patterns
def parse(self, ifconfig=None):
"""
Parse ifconfig output into self._interfaces.
Optional Arguments:
ifconfig
The data (stdout) from the ifconfig command. Default is to
call self._meta.ifconfig_cmd_args for the stdout.
"""
_interfaces = []
if not ifconfig:
ifconfig, err, retcode = exec_cmd(self._meta.ifconfig_cmd_args)
self.ifconfig_data = ifconfig
cur = None
all_keys = []
for line in self.ifconfig_data.splitlines():
for pattern in self._get_patterns():
m = re.match(pattern, line)
if m:
groupdict = m.groupdict()
# Special treatment to trigger which interface we're
# setting for if 'device' is in the line. Presumably the
# device of the interface is within the first line of the
# device block.
if 'device' in groupdict:
cur = groupdict['device']
if not self._interfaces.has_key(cur):
self._interfaces[cur] = {}
for key in groupdict:
if key not in all_keys:
all_keys.append(key)
self._interfaces[cur][key] = groupdict[key]
# fix it up
self._interfaces = self.alter(self._interfaces)
# standardize
for key in all_keys:
for device,device_dict in self._interfaces.items():
if key not in device_dict:
self._interfaces[device][key] = None
if type(device_dict[key]) == str:
self._interfaces[device][key] = device_dict[key].lower()
def alter(self, interfaces):
"""
Used to provide the ability to alter the interfaces dictionary before
it is returned from self.parse().
Required Arguments:
interfaces
The interfaces dictionary.
Returns: interfaces dict
"""
# fixup some things
for device,device_dict in interfaces.items():
if 'inet' in device_dict:
try:
host = socket.gethostbyaddr(device_dict['inet'])[0]
interfaces[device]['hostname'] = host
except socket.herror as e:
interfaces[device]['hostname'] = None
return interfaces
@property
def interfaces(self):
"""
Returns the full interfaces dictionary.
"""
return self._interfaces
@property
def default_interface(self):
"""
Returns the default interface device.
"""
out, err, ret = exec_cmd(['/sbin/route', '-n'])
lines = out.splitlines()
for line in lines[2:]:
if line.split()[0] == '0.0.0.0':
iface = line.split()[-1]
for interface in self.interfaces:
if interface == iface:
return self.interfaces[interface]
return None # pragma: nocover
class UnixParser(IfcfgParser):
def __init__(self, *args, **kw):
super(UnixParser, self).__init__(*args, **kw)
class LinuxParser(UnixParser):
def __init__(self, *args, **kw):
super(LinuxParser, self).__init__(*args, **kw)
class Linux2Parser(LinuxParser):
class Meta:
override_patterns = [
'(?P<device>^[a-zA-Z0-9]+)(.*)Link encap:(.*).*',
'(.*)Link encap:(.*)(HWaddr )(?P<ether>[^\s]*).*',
'.*(inet addr:)(?P<inet>[^\s]*).*',
'.*(inet6 addr: )(?P<inet6>[^\s\/]*/(?P<prefixlen>[\d]*)).*',
'.*(Bcast:)(?P<broadcast>[^\s]*).*',
'.*(Mask:)(?P<netmask>[^\s]*).*',
'.*(Scope:)(?P<scopeid>[^\s]*).*',
]
def __init__(self, *args, **kw):
super(Linux2Parser, self).__init__(*args, **kw)
class MacOSXParser(UnixParser):
class Meta:
override_patterns = [
'.*(status: )(?P<status>[^\s]*).*',
'.*(media: )(?P<media>.*)',
]
def __init__(self, *args, **kw):
super(MacOSXParser, self).__init__(*args, **kw)
def parse(self, *args, **kw):
super(MacOSXParser, self).parse(*args, **kw)
# fix up netmask address for mac
for device,device_dict in self.interfaces.items():
if device_dict['netmask'] is not None:
netmask = self.interfaces[device]['netmask']
self.interfaces[device]['netmask'] = hex2dotted(netmask)
| UTF-8 | Python | false | false | 6,050 | py | 10 | parser.py | 7 | 0.472397 | 0.469091 | 0 | 172 | 34.168605 | 77 |
CARMIN-org/CARMIN-server | 18,614,388,273,853 | 948d09a2ce0f319eae03ae76a74aa6683b1dc763 | 087b6497d26f78c73f529d1b6f2ff877871fe766 | /server/test/fakedata/users.py | 18630f0eaf32e041ad2a18f58a22a6049789758a | [
"MIT"
]
| permissive | https://github.com/CARMIN-org/CARMIN-server | 0b1efc305fb3957b1d700970d0a13ef53d2bb4ca | 49e56edf0c07e5f3dddc10bdc3e5d4d965e263d8 | refs/heads/master | 2021-04-03T01:52:39.846007 | 2018-05-01T13:22:07 | 2018-05-01T13:22:07 | 124,953,943 | 2 | 2 | MIT | false | 2018-05-01T01:32:09 | 2018-03-12T21:29:40 | 2018-04-23T18:19:03 | 2018-04-23T18:19:00 | 532 | 1 | 1 | 3 | Python | false | null | from server.database.models.user import User, Role
from werkzeug.security import generate_password_hash
def admin(encrypted=False):
return User(
username="admin",
password=generate_password_hash("admin") if encrypted else "admin",
role=Role.admin,
api_key="admin-api-key")
def standard_user(encrypted=False):
return User(
username="user",
password=generate_password_hash("user") if encrypted else "user",
role=Role.user,
api_key="standard-user-api-key")
def standard_user_2(encrypted=False):
return User(
username="user2",
password=generate_password_hash("user2") if encrypted else "user",
role=Role.user,
api_key="standard-user-2-api-key")
| UTF-8 | Python | false | false | 755 | py | 86 | users.py | 81 | 0.656954 | 0.651656 | 0 | 26 | 28.038462 | 75 |
sungguenja/bsgg | 10,883,447,159,310 | 92f6b662e11971dfc6fa771a0a0335d19739b262 | 1c67732a24042a991cc9f7e764d4640522391972 | /back/gamedata/migrations/0002_animal_respon_time.py | a9fdbb64c49d905f345deaacfe7c4350a59f0b66 | []
| no_license | https://github.com/sungguenja/bsgg | 1061ccc6f5f08ed9ad14d3a332af020ec7a5df22 | 447283378ac3bb8f489e2a4662bfb6513bc37be2 | refs/heads/master | 2023-04-01T14:15:05.491775 | 2021-04-06T09:46:25 | 2021-04-06T09:46:25 | 318,800,558 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.4 on 2020-12-22 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gamedata', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='animal',
name='respon_time',
field=models.IntegerField(default=0),
),
]
| UTF-8 | Python | false | false | 378 | py | 83 | 0002_animal_respon_time.py | 23 | 0.582011 | 0.529101 | 0 | 18 | 20 | 49 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.