repo_name
stringlengths
7
111
__id__
int64
16.6k
19,705B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
151
content_id
stringlengths
40
40
detected_licenses
sequence
license_type
stringclasses
2 values
repo_url
stringlengths
26
130
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
14.6k
687M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
12 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
10.2M
gha_stargazers_count
int32
0
178k
gha_forks_count
int32
0
88.9k
gha_open_issues_count
int32
0
2.72k
gha_language
stringlengths
1
16
gha_archived
bool
1 class
gha_disabled
bool
1 class
content
stringlengths
10
2.95M
src_encoding
stringclasses
5 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
10
2.95M
extension
stringclasses
19 values
num_repo_files
int64
1
202k
filename
stringlengths
4
112
num_lang_files
int64
1
202k
alphanum_fraction
float64
0.26
0.89
alpha_fraction
float64
0.2
0.89
hex_fraction
float64
0
0.09
num_lines
int32
1
93.6k
avg_line_length
float64
4.57
103
max_line_length
int64
7
931
txazo/txazodevelop
7,069,516,192,335
9c426c6732ce582c769c3c76b7ec757b97e74dc2
ac9b510bcc73d41646da53f7a12c3f15dab744bf
/python/test/4.py
4f22b113022c4d0043ff14130d372bc70101c3a9
[]
no_license
https://github.com/txazo/txazodevelop
5eb73e12f8b2f2de58808a3c594d5e1724fbdbdf
c07eebf832b34172b807b9124bd5a81a390b1b05
refs/heads/master
2020-03-25T22:44:25.477523
2018-08-10T05:16:06
2018-08-10T05:16:06
29,594,975
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # tuple:不可变更 s1 = (12, 12.5, True, "hello") print s1, type(s1) print s1[0] print s1[-1] # 最后一个 print s1[-2] # 倒数第二个 print s1[:3] #(0, 1, 2) print s1[1:] #1到最后 print s1[1:3] #(1, 2) print s1[0:4:2] #(0, 2) print s1[3:1:-1] #(3, 2) # list:可以变更 s2 = [12, 12.5, True, "hello"] print s2, type(s2) print s2[0] s2[1] = False print s2[1] # 字符串(tuple) str = "abcdefg" print str[2]
UTF-8
Python
false
false
449
py
803
4.py
640
0.568922
0.431078
0
24
15.625
30
gaoyan10/server-sdk-python
5,214,090,313,927
cdcae6704a4565480beed61d73236f679ab7620d
692b8244918908c1763c21c0764c7a73b862b24c
/rongcloud/api.py
739606631b25990d3cbde1beb08d2ad0eef11daa
[ "MIT" ]
permissive
https://github.com/gaoyan10/server-sdk-python
880017c754dcb09ce6509d2c89c6b0f0e3d037bb
0090b6cae47ee6fb0174a022a1185cedde957bd8
refs/heads/master
2020-12-11T04:00:28.431776
2014-12-26T10:07:09
2014-12-26T10:07:09
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/env python # coding=utf-8 import os import json import logging import random import datetime import hashlib import platform import requests import util import exceptions from version import __version__ class ApiClient(object): api_host = "https://api.cn.rong.io" response_type = "json" library_details = "python %s" % platform.python_version() user_agent = "RongCloudSdk/RongCloud-Python-Sdk %s (%s)" % \ (library_details, __version__) ACTION_USER_TOKEN = "/user/getToken" ACTION_MESSAGE_PUBLISH = "/message/publish" ACTION_MESSAGE_SYSTEM_PUBLISH = "/message/system/publish" ACTION_MESSAGE_GROUP_PUBLISH = "/message/group/publish" ACTION_MESSAGE_CHATROOM_PUBLISH = "/message/chatroom/publish" ACTION_GROUP_SYNC = "/group/sync" ACTION_GROUP_CREATE = "/group/create" ACTION_GROUP_JOIN = "/group/join" ACTION_GROUP_QUIT = "/group/quit" ACTION_GROUP_DISMISS = "/group/dismiss" ACTION_CHATROOM_CREATE = "/chatroom/create" ACTION_CHATROOM_DESTROY = "/chatroom/destroy" ACTION_CHATROOM_QUERY = "/chatroom/query" def __init__(self, app_key=None, app_secret=None, verify=True): """ API 客户端 Usage:: >>> from rongcloud.api import ApiClient >>> client = ApiClient('xxxxx', 'xxxx') 建议您将APPKEY, APPSECRET 保存在系统的环境变量中 环境变量的键值分别为:rongcloud-app-key, rongcloud-app-secret >>> from rongcloud.api import ApiClient >>> client = ApiClient() :param app_key: 开发者平台分配的 App Key :param app_secret: 开发者平台分配的 App Secret。 :param verify: 发送请求时是否验证SSL证书的有效性 """ self.app_key = app_key or os.environ.get('rongcloud-app-key') self.app_secret = app_secret or os.environ.get('rongcloud-app-secret') self.verify = verify def make_common_signature(self): """生成通用签名 一般情况下,您不需要调用该方法 文档详见 http://docs.rongcloud.cn/server.html#_API_调用签名规则 :return: {'app-key':'xxx','nonce':'xxx','timestamp':'xxx','signature':'xxx'} """ nonce = str(random.random()) timestamp = str( int(datetime.datetime.now().strftime("%s")) * 1000 ) signature = hashlib.sha1( self.app_secret + nonce + timestamp ).hexdigest() return { "rc-app-key": self.app_key, "rc-nonce": nonce, "rc-timestamp": timestamp, "rc-signature": signature } def headers(self): """Default HTTP headers """ return util.merge_dict( self.make_common_signature(), { "content-type": "application/x-www-form-urlencoded", "user-agent": self.user_agent } ) def http_call(self, url, method, **kwargs): """Makes a http call. Logs response information. """ logging.info("Request[%s]: %s" % (method, url)) start_time = datetime.datetime.now() response = requests.request(method, url, verify=self.verify, **kwargs) duration = datetime.datetime.now() - start_time logging.info("Response[%d]: %s, Duration: %s.%ss." % (response.status_code, response.reason, duration.seconds, duration.microseconds)) return self.handle_response(response, response.content.decode("utf-8")) def handle_response(self, response, content): """Validate HTTP response """ status = response.status_code if status in (301, 302, 303, 307): raise exceptions.Redirection(response, content) elif 200 <= status <= 299: return json.loads(content) if content else {} elif status == 400: raise exceptions.BadRequest(response, content) elif status == 401: raise exceptions.UnauthorizedAccess(response, content) elif status == 403: raise exceptions.ForbiddenAccess(response, content) elif status == 404: raise exceptions.ResourceNotFound(response, content) elif status == 405: raise exceptions.MethodNotAllowed(response, content) elif status == 409: raise exceptions.ResourceConflict(response, content) elif status == 410: raise exceptions.ResourceGone(response, content) elif status == 422: raise exceptions.ResourceInvalid(response, content) elif 401 <= status <= 499: raise exceptions.ClientError(response, content) elif 500 <= status <= 599: raise exceptions.ServerError(response, content) else: raise exceptions.ConnectionError(response, content, "Unknown response code: #{response.code}") def post(self, action, params=None): """POST 应用参数到接口地址 所有http请求由此处理,方法内部封装统一的签名规则及 API URL 当有新的接口推出,而SDK未更新时,您可用该方法 Usage:: >>> from rongcloud.api import ApiClient >>> client = ApiClient() >>> client.post('/user/getToken', {}) :param action: 接口地址,例如:/message/chatroom/publish :param params: 应用级别参数,{"fromUserId":"xxxx", "content":"xxxxx"} :return: {"code":200, "userId":"jlk456j5", "token":"sfd9823ihufi"} """ return self.http_call( url=util.join_url(self.api_host, "%s.%s" % (action, self.response_type)), method="POST", data=params, headers=self.headers() ) def user_get_token(self, user_id, name, portrait_uri): """ 获取token http://docs.rongcloud.cn/server.html#_获取_Token_方法 :param user_id: :param name: :param portrait_uri: :return: {"code":200, "userId":"jlk456j5", "token":"sfd9823ihufi"} """ return self.post( action=self.ACTION_USER_TOKEN, params={ "userId": user_id, "name": name, "portraitUri": portrait_uri } ) def message_publish(self, from_user_id, to_user_id, object_name, content, push_content=None, push_data=None): """ 发送会话消息 http://docs.rongcloud.cn/server.html#_融云内置消息类型表 http://docs.rongcloud.cn/server.html#_发送会话消息_方法 :param from_user_id:发送人用户 Id :param to_user_id:接收用户 Id,提供多个本参数可以实现向多人发送消息。 :param object_name:消息类型,目前包括如下类型 ["RC:TxtMsg","RC:ImgMsg","RC:VcMsg","RC:LocMsg"] :param content:发送消息内容,参考融云消息类型表.示例说明;如果 objectName 为自定义消息类型,该参数可自定义格式。(必传) :param push_content:如果为自定义消息,定义显示的 Push 内容(可选) :param push_data:针对 iOS 平台,Push 通知附加的 payload 字段,字段名为 appData。(可选) :return:{"code":200} """ return self.post( action=self.ACTION_MESSAGE_PUBLISH, params={ "fromUserId": from_user_id, "toUserId": to_user_id, "objectName": object_name, "content": content, "pushContent": push_content if push_content is not None else "", "pushData": push_data if push_data is not None else "" } ) def message_system_publish(self, from_user_id, to_user_id, object_name, content, push_content=None, push_data=None): """发送系统消息 http://docs.rongcloud.cn/server.html#_发送系统消息_方法 :param from_user_id:发送人用户 Id :param to_user_id:接收用户 Id,提供多个本参数可以实现向多人发送消息。 :param object_name:消息类型,目前包括如下类型 ["RC:TxtMsg","RC:ImgMsg","RC:VcMsg","RC:LocMsg"] :param content:发送消息内容,参考融云消息类型表.示例说明;如果 objectName 为自定义消息类型,该参数可自定义格式。(必传) :param push_content:如果为自定义消息,定义显示的 Push 内容(可选) :param push_data:针对 iOS 平台,Push 通知附加的 payload 字段,字段名为 appData。(可选) :return:{"code":200} """ return self.post( action=self.ACTION_MESSAGE_SYSTEM_PUBLISH, params={ "fromUserId": from_user_id, "toUserId": to_user_id, "objectName": object_name, "content": content, "pushContent": push_content if push_content is not None else '', "pushData": push_data if push_data is not None else '' } ) def message_group_publish(self, from_user_id, to_group_id, object_name, content, push_content=None, push_data=None): """以一个用户身份向群组发送消息 http://docs.rongcloud.cn/server.html#_发送群组消息_方法 :param from_user_id:发送人用户 Id :param to_group_id:接收群Id,提供多个本参数可以实现向多群发送消息。(必传) :param object_name:消息类型,目前包括如下类型 ["RC:TxtMsg","RC:ImgMsg","RC:VcMsg","RC:LocMsg"] :param content:发送消息内容,参考融云消息类型表.示例说明;如果 objectName 为自定义消息类型,该参数可自定义格式。(必传) :param push_content:如果为自定义消息,定义显示的 Push 内容(可选) :param push_data:针对 iOS 平台,Push 通知附加的 payload 字段,字段名为 appData。(可选) :return:{"code":200} """ return self.post( action=self.ACTION_MESSAGE_GROUP_PUBLISH, params={ "fromUserId": from_user_id, "toGroupId": to_group_id, "objectName": object_name, "content": content, "pushContent": push_content if push_content is not None else '', "pushData": push_data if push_data is not None else '' } ) def message_chatroom_publish(self, from_user_id, to_chatroom_id, object_name, content): """一个用户向聊天室发送消息 http://docs.rongcloud.cn/server.html#_发送聊天室消息_方法 :param from_user_id:发送人用户 Id。(必传) :param to_chatroom_id:接收聊天室Id,提供多个本参数可以实现向多个聊天室发送消息。(必传) :param object_name:消息类型,参考融云消息类型表.消息标志;可自定义消息类型。(必传) :param content:发送消息内容,参考融云消息类型表.示例说明;如果 objectName 为自定义消息类型,该参数可自定义格式。(必传) :return:{"code":200} """ return self.post( action=self.ACTION_MESSAGE_GROUP_PUBLISH, params={ "fromUserId": from_user_id, "toGroupId": to_chatroom_id, "objectName": object_name, "content": content } ) def group_sync(self, user_id, groups): """同步用户所属群组 融云当前群组的架构设计决定,您不需要调用融云服务器去“创建”群组 也就是告诉融云服务器哪些群组有哪些用户。 您只需要同步当前用户所属的群组信息给融云服务器 即相当于“订阅”或者“取消订阅”了所属群组的消息。 融云会根据用户同步的群组数据,计算群组的成员信息并群发消息。 :param user_id:用户Id :param groups: groupId 和 groupName 的对应关系.例如:{10001:'group1',10002:'group2'} :return:{"code":200} """ group_mapping = {"group[%s]" % k:v for k, v in groups.items()} group_mapping.setdefault("userId", user_id) return self.post(action=self.ACTION_GROUP_SYNC, params=group_mapping) def group_create(self, user_id_list, group_id, group_name): """创建群组,并将用户加入该群组,用户将可以收到该群的消息。 注:其实本方法是加入群组方法 /group/join 的别名。 http://docs.rongcloud.cn/server.html#_创建群组_方法 :param user_id_list:要加入群的用户 Id ,可以传递多个值:[userid1,userid2] :param group_id:要加入的群 Id。 :param group_name:要加入的群 Id 对应的名称。 :return:{"code":200} """ return self.post(action=self.ACTION_GROUP_CREATE, params={ "userId":user_id_list, "groupId":group_id, "groupName":group_name }) def group_join(self, user_id_list, group_id, group_name): """将用户加入指定群组,用户将可以收到该群的消息 http://docs.rongcloud.cn/server.html#_加入群组_方法 :param user_id_list:要加入群的用户 [userid1,userid2 ...] :param group_id:要加入的群 Id。 :param group_name:要加入的群 Id 对应的名称。 :return:{"code":200} """ return self.post(action=self.ACTION_GROUP_JOIN, params={ "userId":user_id_list, "groupId":group_id, "groupName":group_name }) def group_dismiss(self, user_id, group_id): """将该群解散,所有用户都无法再接收该群的消息。 http://docs.rongcloud.cn/server.html#_解散群组_方法 :param user_id: 操作解散群的用户 Id。 :param group_id:要解散的群 Id。 :return:{"code":200} """ return self.post(action=self.ACTION_GROUP_DISMISS, params={ "userId":user_id, "groupId":group_id, }) def chatroom_create(self, chatrooms): """创建聊天室 方法 http://docs.rongcloud.cn/server.html#_创建聊天室_方法 :param chatrooms: {'r001':'room1'} id:要创建的聊天室的id;name:要创建的聊天室的name :return:{"code":200} """ chatroom_mapping = {'chatroom[%s]' % k:v for k, v in chatrooms.items()} return self.post(action=self.ACTION_CHATROOM_CREATE, params=chatroom_mapping) def chatroom_destroy(self, chatroom_id_list=None): """销毁聊天室 方法 当提交参数chatroomId多个时表示销毁多个聊天室 http://docs.rongcloud.cn/server.html#_销毁聊天室_方法 :param chatroom_id_list:要销毁的聊天室 Id。 :return:{"code":200} """ params={ "chatroomId":chatroom_id_list } if chatroom_id_list is not None else {} return self.post(action=self.ACTION_CHATROOM_DESTROY, params=params) def chatroom_query(self, chatroom_id_list=None): """查询聊天室信息 方法 http://docs.rongcloud.cn/server.html#_查询聊天室信息_方法 :param chatroom_id_list:当提交多个时表示查询多个聊天室, 如果为None ,则查询所有聊天室 :return:{"code":200,"chatRooms":[{"chatroomId":"id1001","name":"name1","time":"2014-01-01 1:1:1"},{"chatroomId":"id1002","name":"name2","time":"2014-01-01 1:1:2"}]} """ params={ "chatroomId":chatroom_id_list } if chatroom_id_list is not None else {} return self.post(action=self.ACTION_CHATROOM_QUERY, params=params)
UTF-8
Python
false
false
16,328
py
5
api.py
4
0.569677
0.557634
0
406
33.362069
172
xuanxin-L/Dissertation_Work
14,920,716,424,928
dd9e64a2bb0ab270d3b367ec7b19cb18ec27d999
27f2ee8c92b32a4ef2915148c4886f1da45774ee
/Chapter8/walking_data/gait_separation.py
0cb426b95c1cd7f988ec7834dc4d6d6f6e835b01
[]
no_license
https://github.com/xuanxin-L/Dissertation_Work
5589e3c4cfb363c22312e54375df85aeeb487d93
2cefebeeb04e48419c78d2a7753665c93456b65c
refs/heads/master
2022-06-14T14:34:44.522936
2020-05-03T21:58:13
2020-05-03T21:58:13
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Thu Jul 26 18:39:41 2018 @author: Huawei """ import numpy as np from scipy import signal import matplotlib.pyplot as plt from hip_ankle_points_calculation import get_hip_point, get_ankle_point def sep_gait_by_force(motion, grf, st_walking, ed_walking, num_nodes, ext_nodes, ext_gait, delta, const_dicts, plot_sign=False, write_sign=False, store_path=''): every_node = int(delta/0.01) b, a = signal.butter(2, 2.0*8/(50), 'low', analog=False) LFy = signal.filtfilt(b, a, grf[:, 1]) RFy = signal.filtfilt(b, a, grf[:, 7]) Lthigh = const_dicts['ThighLen'] Lshank = const_dicts['ShankLen'] signL = np.zeros(num_nodes) signR = np.zeros(num_nodes) for indL in range(len(LFy)): if LFy[indL] > 100: signL[indL] = 1 for indR in range(len(RFy)): if RFy[indR] > 100: signR[indR] = 1 DsignL = np.diff(signL) DsignR = np.diff(signR) Lhs = (np.where(DsignL==1)[0]).astype(int) Lto = (np.where(DsignL==-1)[0]).astype(int) Rhs = (np.where(DsignR==1)[0]).astype(int) Rto = (np.where(DsignR==-1)[0]).astype(int) Lgait = np.min([len(Lhs), len(Lto)]) Rgait = np.min([len(Rhs), len(Rto)]) Lswingx = np.zeros((Lgait, 100)) Lswingy = np.zeros((Lgait, 100)) Lswingt = np.zeros((Lgait, 100)) Lstancex = np.zeros((Lgait, 100)) Lstancey = np.zeros((Lgait, 100)) Lstancet = np.zeros((Lgait, 100)) Rswingx = np.zeros((Rgait, 100)) Rswingy = np.zeros((Rgait, 100)) Rswingt = np.zeros((Rgait, 100)) Rstancex = np.zeros((Rgait, 100)) Rstancey = np.zeros((Rgait, 100)) Rstancet = np.zeros((Rgait, 100)) InitialDataL = np.zeros((num_nodes, 9)) InitialDataR = np.zeros((num_nodes, 9)) if Lto[0] < Lhs[0]: for k in range(Lgait-1): for j in range(Lto[k], Lhs[k]): InitialDataL[j, 0] = 0 InitialDataL[j, 1] = (j-Lto[k])*delta InitialDataL[j, 2] = (Lhs[k]-Lto[k])*delta InitialDataL[j, 3:5] = get_ankle_point(motion[Lto[k], 2], motion[Lto[k], 3], motion[Lto[k], 4], Lthigh, Lshank) InitialDataL[j, 5:7] = get_ankle_point(motion[j, 2], motion[j, 3], motion[j, 4], Lthigh, Lshank) InitialDataL[j, 7:9] = get_ankle_point(motion[Lhs[k], 2], motion[Lhs[k], 3], motion[Lhs[k], 4], Lthigh, Lshank) for i in range(Lhs[k], Lto[k+1]): InitialDataL[i, 0] = 1 InitialDataL[i, 1] = (i-Lhs[k])*delta InitialDataL[i, 2] = (Lto[k+1]-Lhs[k])*delta InitialDataL[i, 3:5] = get_hip_point(motion[Lhs[k], 2], motion[Lhs[k], 3], motion[Lhs[k], 4], Lthigh, Lshank) InitialDataL[i, 5:7] = get_hip_point(motion[i, 2], motion[i, 3], motion[i, 4], Lthigh, Lshank) InitialDataL[i, 7:9] = get_hip_point(motion[Lto[k+1], 2], motion[Lto[k+1], 3], motion[Lto[k+1], 4], Lthigh, Lshank) time_sw = np.linspace(0, (Lhs[k]-Lto[k])*delta, Lhs[k]-Lto[k]) time_st = np.linspace(0, (Lto[k+1]-Lhs[k])*delta, Lto[k+1]-Lhs[k]) time_swn = np.linspace(0, (Lhs[k]-Lto[k])*delta, 100) time_stn = np.linspace(0, (Lto[k+1]-Lhs[k])*delta, 100) Lswingx[k, :] = np.interp(time_swn, time_sw, InitialDataL[Lto[k]:Lhs[k], 5]) Lswingy[k, :] = np.interp(time_swn, time_sw, InitialDataL[Lto[k]:Lhs[k], 6]) Lswingt[k, :] = time_swn Lstancex[k, :] = np.interp(time_stn, time_st, InitialDataL[Lhs[k]:Lto[k+1], 5]) Lstancey[k, :] = np.interp(time_stn, time_st, InitialDataL[Lhs[k]:Lto[k+1], 6]) Lstancet[k, :] = time_stn else: for k in range(Lgait-1): for i in range(Lhs[k], Lto[k]): InitialDataL[i, 0] = 1 InitialDataL[i, 1] = (i-Lhs[k])*delta InitialDataL[i, 2] = (Lto[k]-Lhs[k])*delta InitialDataL[i, 3:5] = get_hip_point(motion[Lhs[k], 2], motion[Lhs[k], 3], motion[Lhs[k], 4], Lthigh, Lshank) InitialDataL[i, 5:7] = get_hip_point(motion[i, 2], motion[i, 3], motion[i, 4], Lthigh, Lshank) InitialDataL[i, 7:9] = get_hip_point(motion[Lto[k], 2], motion[Lto[k], 3], motion[Lto[k], 4], Lthigh, Lshank) for j in range(Lto[k], Lhs[k+1]): InitialDataL[j, 0] = 0 InitialDataL[j, 1] = (j-Lto[k])*delta InitialDataL[j, 2] = (Lhs[k+1]-Lto[k])*delta InitialDataL[j, 3:5] = get_ankle_point(motion[Lto[k], 2], motion[Lto[k], 3], motion[Lto[k], 4], Lthigh, Lshank) InitialDataL[j, 5:7] = get_ankle_point(motion[j, 2], motion[j, 3], motion[j, 4], Lthigh, Lshank) InitialDataL[j, 7:9] = get_ankle_point(motion[Lhs[k+1], 2], motion[Lhs[k+1], 3], motion[Lhs[k+1], 4], Lthigh, Lshank) time_sw = np.linspace(0, (Lhs[k+1]-Lto[k])*delta, Lhs[k+1]-Lto[k]) time_st = np.linspace(0, (Lto[k]-Lhs[k])*delta, Lto[k]-Lhs[k]) time_swn = np.linspace(0, (Lhs[k+1]-Lto[k])*delta, 100) time_stn = np.linspace(0, (Lto[k]-Lhs[k])*delta, 100) Lswingx[k, :] = np.interp(time_swn, time_sw, InitialDataL[Lto[k]:Lhs[k+1], 5]) Lswingy[k, :] = np.interp(time_swn, time_sw, InitialDataL[Lto[k]:Lhs[k+1], 6]) Lswingt[k, :] = time_swn Lstancex[k, :] = np.interp(time_stn, time_st, InitialDataL[Lhs[k]:Lto[k], 5]) Lstancey[k, :] = np.interp(time_stn, time_st, InitialDataL[Lhs[k]:Lto[k], 6]) Lstancet[k, :] = time_stn if Rto[0] < Rhs[0]: for p in range(Rgait-1): for j in range(Rto[p], Rhs[p]): InitialDataR[j, 0] = 0 InitialDataR[j, 1] = (j-Rto[p])*delta InitialDataR[j, 2] = (Rhs[p]-Rto[p])*delta InitialDataR[j, 3:5] = get_ankle_point(motion[Rto[p], 2], motion[Rto[p], 6], motion[Rto[p], 7], Lthigh, Lshank) InitialDataR[j, 5:7] = get_ankle_point(motion[j, 2], motion[j, 6], motion[j, 7], Lthigh, Lshank) InitialDataR[j, 7:9] = get_ankle_point(motion[Rhs[p], 2], motion[Rhs[p], 6], motion[Rhs[p], 7], Lthigh, Lshank) for i in range(Rhs[p], Rto[p+1]): InitialDataR[i, 0] = 1 InitialDataR[i, 1] = (i-Rhs[p])*delta InitialDataR[i, 2] = (Rto[p+1]-Rhs[p])*delta InitialDataR[i, 3:5] = get_hip_point(motion[Rhs[p], 2], motion[Rhs[p], 6], motion[Rhs[p], 7], Lthigh, Lshank) InitialDataR[i, 5:7] = get_hip_point(motion[i, 2], motion[i, 6], motion[i, 7], Lthigh, Lshank) InitialDataR[i, 7:9] = get_hip_point(motion[Rto[p+1], 2], motion[Rto[p+1], 6], motion[Rto[p+1], 7], Lthigh, Lshank) time_sw = np.linspace(0, (Rhs[p]-Rto[p])*delta, Rhs[p]-Rto[p]) time_st = np.linspace(0, (Rto[p+1]-Rhs[p])*delta, Rto[p+1]-Rhs[p]) time_swn = np.linspace(0, (Rhs[p]-Rto[p])*delta, 100) time_stn = np.linspace(0, (Rto[p+1]-Rhs[p])*delta, 100) Rswingx[p, :] = np.interp(time_swn, time_sw, InitialDataR[Rto[p]:Rhs[p], 5]) Rswingy[p, :] = np.interp(time_swn, time_sw, InitialDataR[Rto[p]:Rhs[p], 6]) Rswingt[p, :] = time_swn Rstancex[p, :] = np.interp(time_stn, time_st, InitialDataR[Rhs[p]:Rto[p+1], 5]) Rstancey[p, :] = np.interp(time_stn, time_st, InitialDataR[Rhs[p]:Rto[p+1], 6]) Rstancet[p, :] = time_stn else: for p in range(Rgait-1): for i in range(Rhs[p], Rto[p]): InitialDataR[i, 0] = 1 InitialDataR[i, 1] = (i-Rhs[p])*delta InitialDataR[i, 2] = (Rto[p]-Rhs[p])*delta InitialDataR[i, 3:5] = get_hip_point(motion[Rhs[p], 2], motion[Rhs[p], 6], motion[Rhs[p], 7], Lthigh, Lshank) InitialDataR[i, 5:7] = get_hip_point(motion[i, 2], motion[i, 6], motion[i, 7], Lthigh, Lshank) InitialDataR[i, 7:9] = get_hip_point(motion[Rto[p], 2], motion[Rto[p], 6], motion[Rto[p], 7], Lthigh, Lshank) for j in range(Rto[p], Rhs[p+1]): InitialDataR[j, 0] = 0 InitialDataR[j, 1] = (j-Rto[p])*delta InitialDataR[j, 2] = (Rhs[p+1]-Rto[p])*delta InitialDataR[j, 3:5] = get_ankle_point(motion[Rto[p], 2], motion[Rto[p], 6], motion[Rto[p], 7], Lthigh, Lshank) InitialDataR[j, 5:7] = get_ankle_point(motion[j, 2], motion[j, 6], motion[j, 7], Lthigh, Lshank) InitialDataR[j, 7:9] = get_ankle_point(motion[Rhs[p+1], 2], motion[Rhs[p+1], 6], motion[Rhs[p+1], 7], Lthigh, Lshank) time_sw = np.linspace(0, (Rhs[p+1]-Rto[p])*delta, Rhs[p+1]-Rto[p]) time_st = np.linspace(0, (Rto[p]-Rhs[p])*delta, Rto[p]-Rhs[p]) time_swn = np.linspace(0, (Rhs[p+1]-Rto[p])*delta, 100) time_stn = np.linspace(0, (Rto[p]-Rhs[p])*delta, 100) Rswingx[p, :] = np.interp(time_swn, time_sw, InitialDataR[Rto[p]:Rhs[p+1], 5]) Rswingy[p, :] = np.interp(time_swn, time_sw, InitialDataR[Rto[p]:Rhs[p+1], 6]) Rswingt[p, :] = time_swn Rstancex[p, :] = np.interp(time_stn, time_st, InitialDataR[Rhs[p]:Rto[p], 5]) Rstancey[p, :] = np.interp(time_stn, time_st, InitialDataR[Rhs[p]:Rto[p], 6]) Rstancet[p, :] = time_stn if write_sign == True: with open(store_path+'InitialDataL_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(0, num_nodes-2*ext_nodes): for g in range(9): StringP += str(InitialDataL[r + ext_nodes, g]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'InitialDataR_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(0, num_nodes-2*ext_nodes): for g in range(9): StringP += str(InitialDataR[r + ext_nodes, g]) StringP += ' ' StringP += '\n' outfile.write(StringP) Lhs_s = Lhs[(Lhs >= ext_nodes) & (Lhs <= num_nodes-ext_nodes)] Lto_s = Lto[(Lto >= ext_nodes) & (Lto <= num_nodes-ext_nodes)] if Lhs_s[0] < Lto_s[0]: with open(store_path+'Lhs_Lto_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(np.min([len(Lhs_s), len(Lto_s)])): StringP += str(Lhs_s[r]-ext_nodes) StringP += ' ' StringP += str(Lto_s[r]-ext_nodes) StringP += ' ' StringP += '\n' outfile.write(StringP) else: with open(store_path+'Lto_Lhs_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(np.min([len(Lhs_s), len(Lto_s)])): StringP += str(Lto_s[r]-ext_nodes) StringP += ' ' StringP += str(Lhs_s[r]-ext_nodes) StringP += ' ' StringP += '\n' outfile.write(StringP) Rhs_s = Rhs[(Rhs >= ext_nodes) & (Rhs <= num_nodes-ext_nodes)] Rto_s = Rto[(Rto >= ext_nodes) & (Rto <= num_nodes-ext_nodes)] if Rhs_s[0] < Rto_s[0]: with open(store_path+'Rhs_Rto_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(np.min([len(Rhs_s), len(Rto_s)])): StringP += str(Rhs_s[r]-ext_nodes) StringP += ' ' StringP += str(Rto_s[r]-ext_nodes) StringP += ' ' StringP += '\n' outfile.write(StringP) else: with open(store_path+'Rto_Rhs_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(np.min([len(Rhs_s), len(Rto_s)])): StringP += str(Rto_s[r]-ext_nodes) StringP += ' ' StringP += str(Rhs_s[r]-ext_nodes) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Lswingx_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Lgait-2*ext_gait): for q in range(100): StringP += str(Lswingx[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Lswingy_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Lgait-2*ext_gait): for q in range(100): StringP += str(Lswingy[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Lswingt_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Lgait-2*ext_gait): for q in range(100): StringP += str(Lswingt[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Lstancex_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Lgait-2*ext_gait): for q in range(100): StringP += str(Lstancex[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Lstancey_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Lgait-2*ext_gait): for q in range(100): StringP += str(Lstancey[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Lstancet_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Lgait-2*ext_gait): for q in range(100): StringP += str(Lstancet[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Rswingx_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Rgait-2*ext_gait): for q in range(100): StringP += str(Rswingx[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Rswingy_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Rgait-2*ext_gait): for q in range(100): StringP += str(Rswingy[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Rswingt_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Rgait-2*ext_gait): for q in range(100): StringP += str(Rswingt[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Rstancex_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Rgait-2*ext_gait): for q in range(100): StringP += str(Rstancex[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Rstancey_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Rgait-2*ext_gait): for q in range(100): StringP += str(Rstancey[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) with open(store_path+'Rstancet_'+str(st_walking) +'_'+str(ed_walking)+'_'+str(int(100/every_node))+'HZ.txt', 'w') as outfile: StringP = '' for r in range(Rgait-2*ext_gait): for q in range(100): StringP += str(Rstancet[r+ext_gait, q]) StringP += ' ' StringP += '\n' outfile.write(StringP) if plot_sign ==True: Gait_info = np.zeros(6) Gait_info_std = np.zeros(6) Gait_info[0] = np.mean(np.diff(Lhs))*delta Gait_info_std[0] = np.std(np.diff(Lhs))*delta Gait_info[1] = np.mean(Lto[2:-2] - Lhs[1:-3])*delta Gait_info_std[1] = np.std(Lto[2:-2] - Lhs[1:-3])*delta Gait_info[2] = np.mean(Lhs[2:-2] - Lto[2:-2])*delta Gait_info_std[2] = np.std(Lhs[2:-2] - Lto[2:-2])*delta Gait_info[3] = np.mean(np.diff(Rhs))*delta Gait_info_std[3] = np.std(np.diff(Rhs))*delta Gait_info[4] = np.mean(Rto[1:] - Rhs[:-1])*delta Gait_info_std[4] = np.std(Rto[1:] - Rhs[:-1])*delta Gait_info[5] = np.mean(Rhs - Rto)*delta Gait_info_std[5] = np.std(Rhs - Rto)*delta num_par = 3 index = np.arange(num_par) fig2 = plt.figure(figsize=(8, 6)) ax = fig2.add_subplot(1, 1, 1) width = 0.3 p1 = ax.bar(index, Gait_info[:3], width, color='r', bottom=0, yerr=Gait_info_std[:3]) p2 = ax.bar(index+width, Gait_info[3:6], width, color='y', bottom=0, yerr=Gait_info_std[3:6]) ax.set_title('Gait Period Info (s)', fontsize=14) ax.set_xticks(index + width / 2) ax.set_xticklabels(('Full Gait', 'Stance', 'Swing'), fontsize=14) ax.legend((p1[0], p2[0]), ('Left', 'Right'), fontsize=14) plt.show() Lstance_ind = np.where(InitialDataL[:, 0] == 1)[0] Lswing_ind = np.where(InitialDataL[:, 0] == 0)[0] Rstance_ind = np.where(InitialDataR[:, 0] == 1)[0] Rswing_ind = np.where(InitialDataR[:, 0] == 0)[0] fig4 = plt.figure(figsize=(14, 8)) ax1 = fig4.add_subplot(2, 2, 1) plt.ylabel('Pelvis x (m)', fontsize = 14) ax2 = fig4.add_subplot(2, 2, 2) plt.ylabel('Pelvis y (m)', fontsize = 14) ax3 = fig4.add_subplot(2, 2, 3) plt.ylabel('Ankle x (m)', fontsize = 14) plt.xlabel('Gait period (s)', fontsize = 14) ax4 = fig4.add_subplot(2, 2, 4) plt.ylabel('Ankle y (m)', fontsize = 14) plt.xlabel('Gait period (s)', fontsize = 14) ax1.plot(InitialDataL[Lstance_ind, 1], InitialDataL[Lstance_ind, 5], '.', label='Left Leg') ax1.plot(InitialDataR[Rstance_ind, 1], InitialDataR[Rstance_ind, 5], '.', label='Right Leg') ax2.plot(InitialDataL[Lstance_ind, 1], InitialDataL[Lstance_ind, 6], '.', label='Left Leg') ax2.plot(InitialDataR[Rstance_ind, 1], InitialDataR[Rstance_ind, 6], '.', label='Right Leg') ax3.plot(InitialDataL[Lswing_ind, 1], InitialDataL[Lswing_ind, 5], '.', label='Left Leg') ax3.plot(InitialDataR[Rswing_ind, 1], InitialDataR[Rswing_ind, 5], '.', label='Right Leg') ax4.plot(InitialDataL[Lswing_ind, 1], InitialDataL[Lswing_ind, 6], '.', label='Left Leg') ax4.plot(InitialDataR[Rswing_ind, 1], InitialDataR[Rswing_ind, 6], '.', label='Right Leg') plt.legend(fontsize=14) plt.show()
UTF-8
Python
false
false
21,839
py
998
gait_separation.py
58
0.475434
0.448647
0
463
46.170626
133
smunix/brickv
1,108,101,577,842
3cadc54ec3fe01e9174eefaf6b89117aa0e4cb45
076015f6a45d65818d5c700a8896ed6df780a47a
/src/brickv/build_pkg.py
115cd2aa8e9ae78c4218cfa7bf2a5c80d932e875
[]
no_license
https://github.com/smunix/brickv
0e0c6e995c50c467b63ec4131ea9904b3e6d8f83
2d78a242bc25131e5c5122245e43caa33e1fbb0b
refs/heads/master
2021-01-16T18:53:50.629090
2011-12-09T12:23:26
2011-12-09T12:23:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ brickv (Brick Viewer) Copyright (C) 2011 Olaf Lüke <olaf@tinkerforge.com> 2011 Bastian Nordmeyer <bastian@tinkerforge.com> build_pkg.py: Package builder for Brick Viewer This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """ # Windows: # dependencies: # pythonxy (2.6) # py2exe # nsis # win redistributables vcredist under winxp # # run build scripts in all folders # run python build_pkg.py win to build the windows exe # final data is stored in folder "dist" # # script copies OpenGL, special libs and plugin_system # in dist folder import config import sys from distutils.core import setup import os import glob import shutil import matplotlib DESCRIPTION = 'Brick Viewer' NAME = 'Brickv' def build_windows_pkg(): import py2exe # os.system("python build_all_ui.py") # data_files = matplotlib.get_py2exe_datafiles() data_files = [] def visitor(arg, dirname, names): for n in names: if os.path.isfile(os.path.join(dirname, n)): if arg[0] == 'y': #replace first folder name data_files.append((os.path.join(dirname.replace(arg[1],"")) , [os.path.join(dirname, n)])) else: # keep full path data_files.append((os.path.join(dirname) , [os.path.join(dirname, n)])) os.path.walk(os.path.normcase("../build_data/Windows/"), visitor, ('y',os.path.normcase("../build_data/Windows/"))) os.path.walk("plugin_system", visitor, ('n',"plugin_system")) data_files.append( ( os.path.join('.') , [os.path.join('.', 'brickv-icon.png')] ) ) setup( name = NAME, description = DESCRIPTION, version = config.BRICKV_VERSION, data_files = data_files, options = { "py2exe":{ "dll_excludes":["MSVCP90.dll"], "includes":["PyQt4.QtSvg", "sip","PyQt4.Qwt5", "PyQt4.QtCore", "PyQt4.QtGui","numpy.core.multiarray", "PyQt4.QtOpenGL","OpenGL.GL", "ctypes.util", "plot_widget", "pylab", "matplotlib.backends.backend_qt4agg", "scipy.interpolate"], "excludes":["_gtkagg", "_tkagg"] } }, zipfile=None, windows = [{'script':'main.py', 'icon_resources':[(0,os.path.normcase("../build_data/Windows/brickv-icon.ico"))]}] ) # build nsis run = "\"" + os.path.join("C:\Program Files\NSIS\makensis.exe") + "\"" data = " dist\\nsis\\brickv_installer_windows.nsi" print "run:", run print "data:", data os.system(run + data) def build_linux_pkg(): import shutil src_path = os.getcwd() build_dir = 'build_data/linux/brickv/usr/share/brickv' dest_path = os.path.join(os.path.split(src_path)[0], build_dir) if os.path.isdir(dest_path): shutil.rmtree(dest_path) shutil.copytree(src_path, dest_path) build_data_path = os.path.join(os.path.split(src_path)[0], 'build_data/linux') os.chdir(build_data_path) os.system('dpkg -b brickv/ brickv-' + config.BRICKV_VERSION + '_all.deb') if __name__ == "__main__": if sys.argv[1] == "win": sys.argv[1] = "py2exe" # rewrite sys.argv[1] for setup(), want to call py2exe build_windows_pkg() if sys.argv[1] == "linux": build_linux_pkg()
UTF-8
Python
false
false
4,135
py
35
build_pkg.py
29
0.601838
0.589744
0
115
33.947826
254
nkapetanas/Deep-Learning-Image-Classifier
5,093,831,243,365
8ce9e1f9a8f4ac6602ab8eaf9b4a2d6be17df00c
0dc2674d53fa893170079e79795bd1ab711a7076
/cnnSVHN.py
0e923273219f5adcd29b8cdc7c80aceb7067d03e
[]
no_license
https://github.com/nkapetanas/Deep-Learning-Image-Classifier
0dcb509d1d416cac913cad1f9b56a3ad959c6b26
5d0cf41625170c9fa799b32fbca0e860ada2d197
refs/heads/master
2022-04-07T15:31:25.656205
2020-01-27T20:40:43
2020-01-27T20:40:43
231,837,294
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import numpy as np from scipy.io import loadmat from sklearn import metrics from sklearn.model_selection import StratifiedKFold np.random.seed(1400) import itertools from keras.models import Sequential from keras.layers import Dense, Flatten, Conv2D, MaxPool2D from keras.callbacks import EarlyStopping, ModelCheckpoint from six.moves import urllib import matplotlib.pyplot as plt URL_TRAIN_PATH = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat' URL_TEST_PATH = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat' DOWNLOADED_FILENAME_TRAIN = 'housenumbers_training.mat' DOWNLOADED_FILENAME_TEST = 'housenumbers_test.mat' HEIGHT = 32 WIDTH = 32 CHANNELS = 3 # since there are rgb images N_INPUTS = HEIGHT * WIDTH N_OUTPUTS = 11 def download_data(): if not os.path.exists(DOWNLOADED_FILENAME_TRAIN): filename, _ = urllib.request.urlretrieve(URL_TRAIN_PATH, DOWNLOADED_FILENAME_TRAIN) print('Found and verified file from this path: ', URL_TRAIN_PATH) print('Download file: ', DOWNLOADED_FILENAME_TRAIN) if not os.path.exists(DOWNLOADED_FILENAME_TEST): filename, _ = urllib.request.urlretrieve(URL_TEST_PATH, DOWNLOADED_FILENAME_TEST) print('Found and verified file from this path: ', URL_TEST_PATH) print('Download file: ', DOWNLOADED_FILENAME_TEST) def get_kfold(x_train, y_train, k): folds = list(StratifiedKFold(n_splits=k, shuffle=True, random_state=1).split(x_train, y_train)) return folds, x_train, y_train def get_model(x_train): model = Sequential() model.add(Conv2D(9, (3, 3), padding='same', activation='relu', input_shape=x_train.shape[1:])) model.add(MaxPool2D(pool_size=(3, 3))) model.add(Conv2D(36, (3, 3), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(3, 3))) model.add(Conv2D(49, (3, 3), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(11, activation="softmax")) return model def plot_confusion_matrix(confusion_matrix, target_names, normalize=False, title='Confusion matrix'): plt.figure(figsize=(8, 6)) plt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.get_cmap('Blues')) plt.title(title) if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) thresh = confusion_matrix.max() / 1.5 if normalize else confusion_matrix.max() / 2 for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(confusion_matrix[i, j]), horizontalalignment="center", color="white" if confusion_matrix[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(confusion_matrix[i, j]), horizontalalignment="center", color="white" if confusion_matrix[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() def get_random_index_of_images(): indexes_for_ran_chosen_image_each_class = dict() indexes_for_ran_chosen_image_each_class[1] = 9 indexes_for_ran_chosen_image_each_class[2] = 2 indexes_for_ran_chosen_image_each_class[3] = 3 indexes_for_ran_chosen_image_each_class[4] = 15 indexes_for_ran_chosen_image_each_class[5] = 5 indexes_for_ran_chosen_image_each_class[6] = 21 indexes_for_ran_chosen_image_each_class[7] = 14 indexes_for_ran_chosen_image_each_class[8] = 13 indexes_for_ran_chosen_image_each_class[9] = 1 return indexes_for_ran_chosen_image_each_class download_data() # squeeze_me= True -> Unit 1x1 matrix dimensions are squeezed to be scalars train_data_mat = loadmat(DOWNLOADED_FILENAME_TRAIN, squeeze_me=True) test_data_mat = loadmat(DOWNLOADED_FILENAME_TEST, squeeze_me=True) x_train = train_data_mat['X'] y_train = train_data_mat['y'] x_test = test_data_mat['X'] y_test = test_data_mat['y'] # num_images, height, width, num_channels x_train = np.transpose(x_train, (3, 0, 1, 2)) x_test = np.transpose(x_test, (3, 0, 1, 2)) x_train_validation_data = x_train[:7326] y_train_validation_data = y_train[:7326] x_train = x_train[7326:] y_train = y_train[7326:] es = EarlyStopping(monitor='val_loss', mode='auto', verbose=1, patience=2) checkpoint = ModelCheckpoint('/logs/logs.h5', monitor='val_loss', mode='min', save_best_only=True, verbose=1) model = get_model(x_train) model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=10, validation_data=(x_train_validation_data, y_train_validation_data), callbacks=[es]) predicted_values = model.predict_classes(x_test) matrix = metrics.confusion_matrix(y_test, predicted_values) print(metrics.accuracy_score(y_test, predicted_values)) print(metrics.f1_score(y_test, predicted_values, average='micro')) print(metrics.recall_score(y_test, predicted_values, average='micro')) print(metrics.precision_score(y_test, predicted_values, average='micro')) target_names = ['1', '2', '3', '4', '5', '6', '7', '8', '9'] plot_confusion_matrix(matrix, target_names)
UTF-8
Python
false
false
5,559
py
2
cnnSVHN.py
1
0.67692
0.653535
0
150
36.06
106
anstepp/junkyardTemplesSolanum
2,113,123,911,496
19f5f755f8baea09d876af9390f6c2c88cdf168e
8c3bf4b4800ab72666f9add6b66ca628eb319e5d
/testGenePlay.py
07411673f9d35fcef4f06b8a0db2b43f1f6289eb
[]
no_license
https://github.com/anstepp/junkyardTemplesSolanum
0b0dee326d33ce70dc2e0961dda56485f383f032
af4bd0543a633680ae42e10d2eaa6e74fba02cd2
refs/heads/master
2020-05-17T11:17:43.642121
2017-05-08T14:58:35
2017-05-08T14:58:35
38,716,606
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from rtcmix import * import utils, words import random, sys rtsetparams(44100, 2) load("SPECTACLE2") start = 0 #chosen = random.choice(words.words) #file = chosen[1] for thing in words.words: rtinput(thing[1]) amp = 10 env = maketable("curve", 1000, 0,0,-2, 10,1,2, 1000,0) dur = DUR() fftsize = 16384 windowsize = fftsize * 2 winTab = 0 overlap = 2 eqTab = maketable("curve", "nonorm", fftsize, 0,-90,0, 151,-90,2, 152,0,0, 153,0,2, 154,-90,0, 160,-90,2, 161,0,1, 162,0,2, 163,-90,0, 179,-90,2, 180,0,1, 181,0,2, 182,-90,0, 243,-90,2, 244,0,2, 245,-90,0, fftsize,-90) delayTab = maketable("random", fftsize, 0, 0, 10) fbTab = .99 pan = random.random() ringdown = 50 SPECTACLE2(0, 0, dur, amp * env, 1, dur * ringdown, fftsize, windowsize, winTab, overlap, eqTab, delayTab, fbTab, 0, 22050, 0, 22050, 0, 1, 0, pan) start += ring print thing
UTF-8
Python
false
false
889
py
19
testGenePlay.py
18
0.626547
0.455568
0
38
22.421053
91
skdonepudi/100DaysOfCode
5,299,989,666,111
cdc3b695a5a8f843965a83562593f438aa6e63b0
9818262abff066b528a4c24333f40bdbe0ae9e21
/Day 16/SetNumbers.py
b0da7bb7d42efc289f22494e47d87c54e7658ceb
[ "MIT" ]
permissive
https://github.com/skdonepudi/100DaysOfCode
749f62eef5826cb2ec2a9ab890fa23e784072703
af4594fb6933e4281d298fa921311ccc07295a7c
refs/heads/master
2023-02-01T08:51:33.074538
2020-12-20T14:02:36
2020-12-20T14:02:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Set numbers You are given the binary representation of a number. You must consider the highest number of set bits in the binary representation to complete your task. For example, is represented as in binary and it contains four set bits (1-bits). You are also given a number and your task is to determine the number that is less than or equal to and contains the maximum number of set bits in its binary representation. In other words, print a number that is less than or equal to such that the number of set bits in the binary representation of must be maximum Input format First line: An integer denoting the number of test cases For each test case: First line: An integer Output format For each test case, print the answer on a new line denoting a number that is less than or equal to such that the number of set bits in the binary representation of must be maximum. SAMPLE INPUT 1 345 SAMPLE OUTPUT 255 Explanation The number 255 (< 345) has most number of set bits. ''' for _ in range(int(input())): x = bin(int(input())) if all(i == '1' for i in x[2:]): print (int(x, 2)) else: print(int('1' * len(x[3:]), 2))
UTF-8
Python
false
false
1,168
py
379
SetNumbers.py
282
0.725171
0.708048
0
29
39.310345
236
atom015/py_boj
6,133,213,336,532
578b957f56cbd4d38e0ac365cc318b313500eac9
592498a0e22897dcc460c165b4c330b94808b714
/2000번/2309_일곱 난쟁이.py
0b837fff035012f572d6763c934cbd76d02b85fa
[]
no_license
https://github.com/atom015/py_boj
abb3850469b39d0004f996e04aa7aa449b71b1d6
42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d
refs/heads/master
2022-12-18T08:14:51.277802
2020-09-24T15:44:52
2020-09-24T15:44:52
179,933,927
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def main(li,s): for i in range(9): for j in range(9): if i != j: if 100 == s-(li[i]+li[j]): for k in sorted(li): if k != li[i] and k != li[j]: print(k) return num_li = [] for i in range(9): num_li.append(int(input())) main(num_li,sum(num_li)) #이문제는 백설공주와 일곱난쟁이랑 비슷한 브루트 포스문제이다.
UTF-8
Python
false
false
468
py
480
2309_일곱 난쟁이.py
478
0.408213
0.39372
0
14
28.571429
53
bigheadG/gardCharge_rpi
4,183,298,163,513
3aa7423ccef1ffd8ffffbf60b11d24f9b4e4eb8e
73d3ccab5158d4f33d81818eef0b7cd0a5bf3275
/code/packBTSend.py
b492b5da981c5347a93bb0ccd2b8743f6051bd24
[ "MIT" ]
permissive
https://github.com/bigheadG/gardCharge_rpi
15d23af9cd52b45ffedd24cc89ba8843ba7e99fb
45135cf83ac0daa0114d0409bbcfcbd982071ef1
refs/heads/master
2020-04-29T02:15:33.846878
2019-04-02T09:46:07
2019-04-02T09:46:07
175,760,187
8
2
null
null
null
null
null
null
null
null
null
null
null
null
null
# #Code is under develop for control GC101 # class Gdefine: CMD_DRIVE = 1, CMD_CUTOFF_TIME = 2 CMD_TIMER_EN = 3 CMD_READ_MEM = 4 CMD_READ_MEM_A = 4 CMD_FACT_INIT = 5 CMD_SAMPLE_TIME = 6 CMD_READ_CONFIG = 7 CMD_ERASE_QUE = 8 CMD_RUN_TEST = 9 CMD_SET_TRIP_AMP = 11 CMD_SET_OFFTIME = 12 CMD_READ_CONFIG_2 = 14 CMD_OFFLINE_ENABLE = 17 class packBTSend: flow = 0 def packTXdata(self,mode,data): outBuf = [0xaa for i in range(20)] inBuf = [0xaa for i in range(20)] inBuf[2] = mode #outBuf no decrypt outBuf[0] = 40 outBuf[1] = self.flow outBuf[19] = 41 if mode == 1 or mode == 3 or mode == 4 or mode == 6 or mode == 11 or mode == 17: inBuf[3] = (data & 0x000000FF) elif mode == 2: #CMD_CUTOFF_TIME inBuf[4] = data & 0x000000FF inBuf[5] = (data & 0x0000FF00) >> 8 inBuf[6] = (data & 0x00FF0000) >> 16 inBuf[7] = (data & 0xFF000000) >> 24 elif mode == 12: #CMD_SETOFF_TIME inBuf[3] = data & 0x000000FF inBuf[4] = (data & 0x0000FF00) >> 8 inBuf[5] = (data & 0x00FF0000) >> 16 #print("inBuf:{}".format(inBuf)) # encrypt i = 2 for x in inBuf[2:17]: d = x ^ ((i ^ inBuf[18]) ^ 0x38) outBuf[i] = d i += 1 self.flow += 1 self.flow = self.flow % 10 #print("flow = {:d}".format(self.flow)) return outBuf
UTF-8
Python
false
false
1,311
py
7
packBTSend.py
4
0.584287
0.482075
0
55
22.709091
82
thelebster/zapret-info-parser
6,682,969,114,065
d83e6851ef14c5b867c50e860c0a98f518721b0e
8cb749f1c834495e0b98da16d45a028dd30c5259
/updater/update.py
c573edcc4f255a0c79071a998fb9d24ca2be30ef
[]
no_license
https://github.com/thelebster/zapret-info-parser
187fea5215b2e0a3490318720615bab88c8b01b2
1c053b1f38594a05225f130d234551a282c24d23
refs/heads/master
2021-01-05T14:11:03.547108
2020-03-01T19:23:07
2020-03-01T19:23:07
241,046,135
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import glob from pymongo import MongoClient import ipaddress from bson.int64 import Int64 MONGODB_URI = os.getenv('MONGODB_URI', 'mongodb://root:root@localhost:27017/blocked?authMechanism=DEFAULT&authSource=admin') MONGODB_IMPORT_COLLECTION = os.getenv('MONGODB_IMPORT_COLLECTION', 'blocked_new') MONGODB_PROD_COLLECTION = os.getenv('MONGODB_PROD_COLLECTION', 'blocked') IMPORT_DIR = os.getenv('IMPORT_DIR', '../data/archive/utf8') def import_file(filename): mongodb_client = MongoClient(MONGODB_URI) db = mongodb_client.get_database() blocked = db.get_collection(MONGODB_IMPORT_COLLECTION) with open(filename, 'r') as csv_file: lines = csv_file.readlines() inserts = [] inserted = 0 for line in lines: components = line.strip().split(';') if len(components) < 6: continue ips = components[0].split(' | ') domain = components[1] url = components[2].strip('"') decision_org = components[3] decision_num = components[4] decision_date = components[5] if domain.strip() == '': domain = None if url.strip() == '' or url == 'http://' or url == 'https://': url = None for ip in ips: if ip.strip() == '': if domain is not None and len(domain.split('.')) == 4: ip = domain else: ip = None ip_first = None ip_last = None length = None if ip is not None: pair = ip.split('/') ip_first = ipaddress.ip_address(pair[0]) # Skip ipv6. if ip_first.version == 6: continue ip_first = Int64(ip_first) if len(pair) > 1: length = int(pair[1]) ip_last = ip_first | (1 << (32 - length)) - 1 else: length = 32 ip_last = ip_first inserts.append({ 'ip': ip, 'ip_first': ip_first, 'ip_last': ip_last, 'length': length, 'decision_date': decision_date, 'decision_org': decision_org, 'decision_num': decision_num, 'domain': domain, 'url': url, }) if len(inserts) == 10000: result = blocked.insert_many(inserts) result.inserted_ids inserted += len(inserts) inserts = [] pass if len(inserts) > 0: result = blocked.insert_many(inserts) result.inserted_ids inserted += len(inserts) pass if __name__ == '__main__': files = [f for f in glob.glob(IMPORT_DIR + "/*.csv")] for f in files: basename = os.path.basename(f) print(f'Importing {basename} file...') import_file(f) # @todo Run health check somewhere...? mongodb_client = MongoClient(MONGODB_URI) db = mongodb_client.get_database() try: # Try to drop temporary collection. blocked_tmp = db.get_collection(f'~{MONGODB_PROD_COLLECTION}') blocked_tmp.drop() except Exception as err: print(err) try: # Try to rename current collection. blocked = db.get_collection(MONGODB_PROD_COLLECTION) blocked.rename(f'~{MONGODB_PROD_COLLECTION}') except Exception as err: print(err) blocked_new = db.get_collection(MONGODB_IMPORT_COLLECTION) blocked_new.create_index('domain') blocked_new.create_index('ip') blocked_new.create_index('url') blocked_new.rename(MONGODB_PROD_COLLECTION)
UTF-8
Python
false
false
3,993
py
7
update.py
4
0.498873
0.489356
0
114
34.026316
124
amyc28/Premiere-Pro-Silence-Cutter
13,786,845,029,431
9fe608314030874affd5d54da4c8916be138f402
073412f89865e4f726bf14a115630c52a17658e6
/MouseTracker.py
94b73dd02f49c68658e2a0193faa58b9b2e811ca
[]
no_license
https://github.com/amyc28/Premiere-Pro-Silence-Cutter
bbe7347b0dc7e74f0bd5a5b56f2d0edd8b2885df
9787c3648790b2466c11cff1708a78190b0e6794
refs/heads/master
2021-02-19T07:38:55.288611
2020-02-28T03:20:47
2020-02-28T03:20:47
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np from PIL import ImageGrab import cv2 from directKeys import click, queryMousePosition, PressKey, ReleaseKey, SPACE import time import math from pynput.keyboard import Key, Controller as KeyboardController from pynput.mouse import Controller, Button as MouseController import pyautogui while True: mouse_pos = queryMousePosition() #sets mouse_pos to the current position of the mouse print(mouse_pos.x, mouse_pos.y) time.sleep(.01)
UTF-8
Python
false
false
484
py
3
MouseTracker.py
2
0.762397
0.756198
0
14
32.142857
89
S-boker/Personal-Projects-
10,015,863,780,553
7c7874bfd98986a3de47f095df60da0399e5b71a
281e4720eab3c1ae339bd0f6c5554e9c35a1fd79
/Sudoku_Solver.py
f7959557e3ec71e2d2e5e3d7ec18691434e15818
[]
no_license
https://github.com/S-boker/Personal-Projects-
869a19282ae357bc884b188cfa1e8ba819596b6c
b03772707981e006b95e90be6b851d6ccf12373d
refs/heads/master
2021-01-08T03:48:21.486816
2020-02-20T14:45:26
2020-02-20T14:45:26
241,903,557
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Shohum Boker # 2/19/20 # initializing lists box0 = [0] * 9 box1 = [0] * 9 box2 = [0] * 9 box3 = [0] * 9 box4 = [0] * 9 box5 = [0] * 9 box6 = [0] * 9 box7 = [0] * 9 box8 = [0] * 9 boxes = [box0, box1, box2, box3, box4, box5, box6, box7, box8] # Okay function def okay(b, sq): # box test bs = [int(s) for s in b] for s in b: if bs.count(s) > 1 and s != 0: return False # Initializing helper lists lists rt = [] ct = [] for box in boxes: tbi = boxes.index(box) bi = boxes.index(b) if b is box: continue if tbi % 3 == bi % 3: ct.append(box) # Column adjacent Boxes if tbi // 3 == bi // 3: rt.append(box) # Row adjacent Boxes sqi = b.index(sq) ri = [i for i in range(9) if i // 3 == sqi // 3] # Row helper list ci = [i for i in range(9) if i % 3 == sqi % 3] # Column helper list # Using helper lists for column and row test for x in range(6): y = x // 3 cb = ct[y] rb = rt[y] c = ci[x % 3] r = ri[x % 3] if int(rb[r]) == sq or int(cb[c]) == sq: return False return True # Finding the last integer if the entry goes out of bounds def lastInt(b, ind): ind -= 1 while isinstance(b[ind], str) or ind < 0: if ind < 0: b = boxes[boxes.index(b) - 1] ind = 8 if isinstance(b[ind], str): ind -= 1 return [b, ind] def printr(): # Checking if the board was Solvable for box in boxes: for s in box: if s ==0: print("----------------------------------") print("Unsolvable") quit() # Helper list row = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] s = "----------------------" for x in range(81): # Every row has nine entries if x % 9 == 0: s += "\n" # Complex formula taking advantage of the helper list to convert boxes into rows s += str(boxes[row[x // 27][(x // 3) % 3]][row[(x // 9) % 3][x % 3]]) print(s) # Instructions for the user to operate the UI system (hopefully in the future the UI will be better) print("Welcome to Sudoku Solver:") print("Enter the rows in your Sudoku Board from the top to the bottom and all blank entries should be'0'") print("For example: 104005007 is a valid row") row = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] x = 0 boo = False while x < 9: n = input("row" + str(x + 1) + ": ") # Check for proper length while len(n) != 9: print("Error: Row is not of size 9") n = input("row" + str(x + 1) + ": ") for y in range(9): if boo: boo = False break try: # Check for proper data type and have all values as ints bs = [int(s) for s in n] except ValueError: print("Error: Invalid input, characters are not acceptable. ") # Allows the user to redo the row x -= 1 break else: # Checks that the row is valid for s in bs: if bs.count(s) > 1 and s != 0: print("Error: Repeating value found: " + str(s)) x -= 1 boo = True break # Converts all 0's to ints if n[y] == "0": r = int(n[y]) else: r = n[y] # Complex formula to put to rows in the forms of boxes boxes[row[x // 3][y // 3]][row[x % 3][y % 3]] = r x += 1 # Initializing index of "boxes" q = -1 while True: # Moving to the next box q += 1 # If all boxes are filled print if q == 9: printr() break else: box = boxes[q] # Initializing index of "boxes" p = -1 while True: # Moving to the next square in the box p += 1 # If all squares are filled up then move on to the next box if p == 9: break else: while True: # Checking if the value was entered by the user if isinstance(box[p], str): break else: # Add one to the current value in the square box[p] += 1 # If the value is to big: backtrack if box[p] == 10: box[p] = 0 cord = lastInt(box, p) box = cord[0] p = cord[1] q = boxes.index(box) # Check if the value in the square is valid in rules of Sudoku at its current index elif okay(box, box[p]): break
UTF-8
Python
false
false
4,982
py
1
Sudoku_Solver.py
1
0.44159
0.414894
0
156
29.935897
107
LMZimmer/Auto-PyTorch_refactor
12,429,635,357,991
ee60ab19dfca60cba566b1683320e744d508d9d0
06f8f1b812e6651222bdb8299b840c09f919d89c
/autoPyTorch/search_space/search_space.py
5587eff15d5947ec47913b42ef7622960b71bd20
[ "Apache-2.0", "LicenseRef-scancode-philippe-de-muyter", "LicenseRef-scancode-unknown-license-reference" ]
permissive
https://github.com/LMZimmer/Auto-PyTorch_refactor
4cda7658319db4faf894895f046fac49ebd07757
ac7a9ce35e87a428caca2ac108b362a54d3b8f3a
refs/heads/master
2023-02-19T02:02:57.256438
2020-12-08T14:18:27
2020-12-08T14:18:27
281,992,226
0
1
Apache-2.0
false
2021-01-22T14:53:10
2020-07-23T15:42:51
2020-12-09T14:27:36
2021-01-22T14:53:09
877
0
2
12
Python
false
false
import typing from typing import Optional import ConfigSpace as cs class SearchSpace: hyperparameter_types = { 'categorical': cs.CategoricalHyperparameter, 'integer': cs.UniformIntegerHyperparameter, 'float': cs.UniformFloatHyperparameter, 'constant': cs.Constant, } @typing.no_type_check def __init__( self, cs_name: str = 'Default Hyperparameter Config', seed: int = 11, ): """Fit the selected algorithm to the training data. Args: cs_name (str): The name of the configuration space. seed (int): Seed value used for the configuration space. Returns: """ self._hp_search_space = cs.ConfigurationSpace( name=cs_name, seed=seed, ) @typing.no_type_check def add_hyperparameter( self, name: str, hyperparameter_type: str, **kwargs, ): """Add a new hyperparameter to the configuration space. Args: name (str): The name of the hyperparameter to be added. hyperparameter_type (str): The type of the hyperparameter to be added. Returns: hyperparameter (cs.Hyperparameter): The hyperparameter that was added to the hyperparameter search space. """ missing_arg = SearchSpace._assert_necessary_arguments_given( hyperparameter_type, **kwargs, ) if missing_arg is not None: raise TypeError(f'A {hyperparameter_type} must have a value for {missing_arg}') else: hyperparameter = SearchSpace.hyperparameter_types[hyperparameter_type]( name=name, **kwargs, ) self._hp_search_space.add_hyperparameter( hyperparameter ) return hyperparameter @staticmethod @typing.no_type_check def _assert_necessary_arguments_given( hyperparameter_type: str, **kwargs, ) -> Optional[str]: """Assert that given a particular hyperparameter type, all the necessary arguments are given to create the hyperparameter. Args: hyperparameter_type (str): The type of the hyperparameter to be added. Returns: missing_argument (str|None): The argument that is missing to create the given hyperparameter. """ necessary_args = { 'categorical': {'choices', 'default_value'}, 'integer': {'lower', 'upper', 'default', 'log'}, 'float': {'lower', 'upper', 'default', 'log'}, 'constant': {'value'}, } hp_necessary_args = necessary_args[hyperparameter_type] for hp_necessary_arg in hp_necessary_args: if hp_necessary_arg not in kwargs: return hp_necessary_arg return None @typing.no_type_check def set_parent_hyperperparameter( self, child_hp, parent_hp, parent_value, ): """Activate the child hyperparameter on the search space only if the parent hyperparameter takes a particular value. Args: child_hp (cs.Hyperparameter): The child hyperparameter to be added. parent_hp (cs.Hyperparameter): The parent hyperparameter to be considered. parent_value (str|float|int): The value of the parent hyperparameter for when the child hyperparameter will be added to the search space. Returns: """ self._hp_search_space.add_condition( cs.EqualsCondition( child_hp, parent_hp, parent_value, ) ) @typing.no_type_check def add_configspace_condition( self, child_hp, parent_hp, configspace_condition, value, ): """Add a condition on the chi Args: child_hp (cs.Hyperparameter): The child hyperparameter to be added. parent_hp (cs.Hyperparameter): The parent hyperparameter to be considered. configspace_condition (cs.AbstractCondition): The condition to be fullfilled by the parent hyperparameter. A list of all the possible conditions can be found at ConfigSpace/conditions.py. value (str|float|int|list): The value of the parent hyperparameter to be matched in the condition. value needs to be a list only for the InCondition. Returns: """ self._hp_search_space.add_condition( configspace_condition( child_hp, parent_hp, value, ) )
UTF-8
Python
false
false
4,808
py
67
search_space.py
62
0.571339
0.570923
0
153
30.424837
93
Heisenberg2017/LogColorizer
171,798,707,333
656b568229bc010d68a4a0352c55c979d88abbd5
8b968f85f54966924626e7bb89ef73f71f466ba0
/monitor/api.py
975711603548e41f655c13002b103fafed0dc7e4
[ "MIT" ]
permissive
https://github.com/Heisenberg2017/LogColorizer
31eb0d7e565a574adddfe9ae7d09451c2a49ce9e
61ac64d1e4e8b1cc4d0e3104d25ff20d7ce39262
refs/heads/master
2020-07-23T04:46:00.549544
2019-12-09T08:56:27
2019-12-09T08:56:27
207,447,585
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from monitor.watcher import Watcher import time try: import configparser except ImportError: import ConfigParser as configparser def multi_watch(args): gens = [] config = configparser.ConfigParser() config.read('conf/monitor.conf') for name in config.sections(): print('-----[%s]-----' % name) watch_dict = {} for key, value in config[name].items(): print("%s: %s" % (key, value)) watch_dict[key] = value excludes = watch_dict['excludes'].split('|') if watch_dict.get('excludes') else None gen = Watcher(watch_dict['path'], excludes=excludes, project=name).auto_reload(watch_dict['action']) gens.append(gen) while True: for g in gens: next(g) time.sleep(1)
UTF-8
Python
false
false
787
py
13
api.py
13
0.598475
0.597205
0
25
30.48
108
AlexLSmith/mqtt-app
3,341,484,590,809
7fe1c3d54a4ef1692a755e4c09885b2029961358
ac219f70f734dbe108cee4612af5c046dfdd5f05
/mqtt_app/config.py
f204390628cfea120c75b6a73e5f4e8265a8f6fe
[]
no_license
https://github.com/AlexLSmith/mqtt-app
8cb6a302be6ada8751895867696b6ca64e30ace8
31c4f367caadc0c20db41d7e2448b6e98bdc9f15
refs/heads/master
2020-06-21T13:52:30.987317
2019-07-18T07:20:17
2019-07-18T07:20:17
197,472,528
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
HOST = "localhost" PORT = 1883 SENSOR_TOPIC = "sensor/upload" AGGREGATE_TOPIC = "sensor/aggregate" AVG_TIME_PERIODS = (1, 5, 30)
UTF-8
Python
false
false
130
py
7
config.py
4
0.7
0.638462
0
6
20.666667
36
yuanhaoz/Python-Scrapy
15,556,371,573,625
1077688a82302b5b94a732cdf593fef3de445273
686be27d73e4abbe45d51bfa64c80bcadc89ceb9
/test/test2.py
35457996be6ae6f4b1b4cbde13f93d8132f8a373
[]
no_license
https://github.com/yuanhaoz/Python-Scrapy
30cbfc7dd9096d02da740e0d3207ba3ae017d964
5c9d7b2e83c3fdf88c77792f9ad189b8a2d35905
refs/heads/master
2020-12-01T05:41:58.159997
2016-08-27T07:53:45
2016-08-27T08:09:07
66,702,527
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- ######################################################################## # # Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved # ######################################################################## import md5 import time import urllib import re import scrapy from scrapy.http import Request from bs4 import BeautifulSoup from scrapy import signals html_doc = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="zh-HK" lang="zh-hk"> <head> <meta name="Keywords" content="保安局禁毒處, 禁毒影音短片"> <meta name="description" content="保安局禁毒處 - 禁毒影音短片"> <meta name="description-2" content="Security Bureau, Narcotics Division Website"> <meta name="author" content="Security Bureau, Narcotics Division"> <script language="JavaScript" src="/js/jquery-1.11.0.min.js" type="text/javascript"></script> <link href="/css/print.css" rel="stylesheet" type="text/css" media="print"> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <script language="JavaScript" src="../js/genLayer.js" type="text/javascript"></script> <script language="JavaScript" src="../js/common.js" type="text/javascript"></script> <script language="JavaScript" src="../js/data.js" type="text/javascript"></script> <script language="JavaScript" src="../js/swf.js" type="text/javascript"></script> <script language="JavaScript" src="../js/jquery-1.11.0.min.js" type="text/javascript"></script> <script language="JavaScript" src="../js/reCon.js" type="text/javascript"></script> <link href="/css/format.css" rel="stylesheet" type="text/css" > <title>保安局禁毒處 - 禁毒影音短片</title> <script language="JavaScript" type="text/javascript"> <!-- var currentSection='7,3'; function MM_swapImgRestore() { //v3.0 var i,x,a=document.MM_sr; for(i=0;a&&i<a.length&&(x=a[i])&&x.oSrc;i++) x.src=x.oSrc; } function MM_preloadImages() { //v3.0 var d=document; if(d.images){ if(!d.MM_p) d.MM_p=new Array(); var i,j=d.MM_p.length,a=MM_preloadImages.arguments; for(i=0; i<a.length; i++) if (a[i].indexOf("#")!=0){ d.MM_p[j]=new Image; d.MM_p[j++].src=a[i];}} } function MM_findObj(n, d) { //v4.01 var p,i,x; if(!d) d=document; if((p=n.indexOf("?"))>0&&parent.frames.length) { d=parent.frames[n.substring(p+1)].document; n=n.substring(0,p);} if(!(x=d[n])&&d.all) x=d.all[n]; for (i=0;!x&&i<d.forms.length;i++) x=d.forms[i][n]; for(i=0;!x&&d.layers&&i<d.layers.length;i++) x=MM_findObj(n,d.layers[i].document); if(!x && d.getElementById) x=d.getElementById(n); return x; } function MM_swapImage() { //v3.0 var i,j=0,x,a=MM_swapImage.arguments; document.MM_sr=new Array; for(i=0;i<(a.length-2);i+=3) if ((x=MM_findObj(a[i]))!=null){document.MM_sr[j++]=x; if(!x.oSrc) x.oSrc=x.src; x.src=a[i+2];} } function MM_openBrWindow(theURL,winName,features) { //v2.0 window.open(theURL,winName,features); } function openWin(theURL,winName,features,opener) { //v2.0 popUp=window.open(theURL,winName,features); popUp.opener=opener; } //--> </script> <style type="text/css"> <!-- .search { span-family: "Arial", "Helvetica", "sans-serif"; span-size: 12px; text-decoration: none} .sidebar a:visited { span-family: "Arial", "Helvetica", "sans-serif"; span-size: 12px; text-decoration: none ; color: #000099} .footer { span-size: 10pt; color: #000000; span-family: "Arial", "Helvetica", "sans-serif"} .header { span-size: 10pt; color: #3333FF ; span-family: "Arial", "Helvetica", "sans-serif"} --> .reCon3 .batch { clear: both; padding-top: 5px !important; } </style> </head> <body><h1 style="display:none">Title</h1> <table border="0" cellpadding="0" cellspacing="0" width="760"> <tr valign="top"> <td> <script language="JavaScript" type="text/javascript">targetSwitchPage = ""</script> <script language="JavaScript" src="/js/header.js" type="text/javascript"></script> </td> </tr> </table> <table id="content" width="760" border="0" align="left" cellpadding="0" cellspacing="0"> <tr> <td align="left" valign="top" width="153" height="22" > <script language="JavaScript" type="text/javascript">getLeftMenu();</script> </td> <td align="left" valign="top" bgcolor="#FFFFFF"> <table width="597" border="0" cellpadding="5" cellspacing="5" > <!-- <tr> <td> <script language="JavaScript" type="text/javascript">generateTopMenu();</script> </td> </tr> --> <tr> <td> <table border="1"> <Tr valign="middle"> <td width="25%"><a href="druginfo.htm" title="This link will open in new window">毒品資料</a></td> <td width="25%"><a href="publications.htm" title="This link will open in new window">禁毒刊物</a></td> <td width="25%"><a href="videos_radio_clips.htm" title="This link will open in new window">禁毒影音短片</a></td> <td width="25%"><a href="resources_parents.htm" title="This link will open in new window">給家長的禁毒資源</a></td> </tr> <Tr valign="middle"> <td width="25%"><a href="resources_teachers.htm" title="This link will open in new window">給教師和社工的禁毒資源</a></td> <td width="25%"><a href="resources_professionals.htm" title="This link will open in new window">給醫護人員的禁毒資源</a></td> <td width="25%"><a href="resources_youths.htm" title="This link will open in new window">給青年人的禁毒資源</a></td> <td width="25%"><a href="druginfocentre.htm" title="This link will open in new window">香港賽馬會藥物資訊天地</a></td> </tr> </table> </td> </tr> <tr> <td valign="top" align="left"><img src="images/top_buttons/top_buttons_dot_line.gif" alt=""> </td> </tr> <tr> <td valign="top" align="left"><a name="top"></a></td> </tr> <tr> <a name="main-content" id="main-content" tabindex="0"></a> <td valign="top"><span ><strong>禁毒影音短片</strong></span></td> </tr> <tr> <td valign="top"> <table border="0" cellspacing="2" cellpadding="8" > <Tr valign="top"> <td><strong>禁毒影像短片:</strong></td> </tr> <tr> <td> <div style="width:100%;" class="reCon3"> <div class="iso"> <a href="http://www.hkayd.org.hk/YourChoice/finalentries.pdf" target="_blank"><img src="/tc/images/finalentries.jpg" alt="" border="0" > <br>禁 毒 基 金 贊 助 「 Your Choice 」 納 米 電 影 創 作 比 賽</a> </div> <div class="iso"> <a href="tv_announcements.htm"><img src="/tc/images/banner_03.gif" alt="" border="0" > <br>電 視 宣 傳 短 片</a> </div> <div class="iso"> <a href="sunproject.htm"><img src="/tc/images/sunlife_icon.gif" alt="" border="0" > <br>禁 毒 基 金 贊 助 「 路 訊 通 」 節 目 《Sun 生 命》</a> </div> <!--div class="iso"> <div class="isopic"><a href="http://www.metroradio.com.hk/Campaign/997/TeensNoDrugs/" target="_blank" title="此連結會於新視窗開啟"><img src="../en/images/antidrug_event_2012.gif" border="0" width="137" height="103"></a></div> <div class="isotext"><a href="http://www.metroradio.com.hk/Campaign/997/TeensNoDrugs/" target="_blank" title="此連結會於新視窗開啟">打 開 TEEN 窗 愛 @ 無 毒 SHOW</a></div> </div--> <!--div class="iso"> <div class="isopic"><a target="_blank" title="此連結會於新視窗開啟" href="http://www.metroradio.com.hk/Campaign997/KnockDrugsOutWithLove/ "><img src="../en/images/teenteenshow.gif" border="0" width="137" height="103"></a></div> <div class="isotext"><a target="_blank" title="此連結會於新視窗開啟" href="http://www.metroradio.com.hk/Campaign997/KnockDrugsOutWithLove/ ">TEEN TEEN 有 愛 無 毒 Show</a></div> </div--> <!--div class="iso"> <div class="isopic"><a href="http://programme.rthk.hk/rthk/tv/programme.php?name=tv/drugbattleforum&p=5923" target="_blank" title="此連結會於新視窗開啟"><img src="../en/images/Drug Battle Forum.png" border="0" width="137" height="103"></a></div> <div class="isotext"><a href="http://programme.rthk.hk/rthk/tv/programme.php?name=tv/drugbattleforum&p=5923" target="_blank" title="此連結會於新視窗開啟">香 港 電 台 電 視 節 目 《 毒 海 論 浮 生 》 </a></div> </div--> <!--div class="iso"> <div class="isopic"><a href="http://programme.rthk.hk/rthk/tv/programme.php?name=tv/drugbattle2013&p=5689" target="_blank" title="此連結會於新視窗開啟"><img src="../en/images/rthk_progam.gif" border="0" width="137" height="103"></a></div> <div class="isotext"><a href="http://programme.rthk.hk/rthk/tv/programme.php?name=tv/drugbattle2013&p=5689" target="_blank" title="此連結會於新視窗開啟">香 港 電 台 電 視 節 目 《 毒 海 浮 生 》</a></div> </div--> <!--div class="iso"> <div class="isopic"><a href="http://programme.tvb.com/drama/beautyofthegame" target="_blank" title="此連結會於新視窗開啟"><img src="../en/images/icon_beautyofthegame.gif" border="0" width="137" height="103"></a></div> <div class="isotext"><a href="http://programme.tvb.com/drama/beautyofthegame" target="_blank" title="此連結會於新視窗開啟">禁毒電視連續劇《美麗高解像》</a></div> </div--> <div class="iso"> <a href="antidrug_themesong_2.htm"><img src="/tc/images/icon_antidrug_song_2.gif" border="0" width="137" height="103" alt="" b> <br>「不可一.不可再」<br>全港青少年禁毒運動2009 <br>主題曲「天造之材」MTV</a> </div> <div class="iso"> <a href="antidrug_themesong.htm"><img src="/tc/images/icon_antidrug_song.gif" border="0" width="137" height="103" alt="" b> <br>「不可一.不可再」禁毒運動主題曲「不不不」MTV</a> </div> </div> </td> </tr> <tr> <td>&nbsp;</td> </tr> </table> </td> </tr> <tr> <td valign="top"> <table border="0" cellspacing="2" cellpadding="8" > <Tr valign="top"> <td colspan="3"><strong>禁毒聲音短片:</strong></td> </tr> <tr> <td> <div style="width:100%;" class="reCon3"> <div class="iso"> <a href="radio_announcements.htm"><img src="/tc/images/icon_antidrug_radio.gif" border="0" width="137" height="103" alt="" > <br>電台宣傳聲帶</a> </div> <div class="iso"> <a href="rs_handstogether_2015.htm"><img src="/tc/images/sqsqkd2015.jpg" border="0" alt="" > <br>禁毒電台環節「手牽手 齊抗毒」</a> </div> <div class="iso"> <a href="adEduSeg.htm"><img src="/tc/images/jjdp_qxkd_s.jpg" border="0" alt="" > <br>禁毒電台環節「堅拒毒品 齊心抗毒」</a> </div> <div class="iso"> <a href="http://www.metroradio.com.hk/Campaign/2013/997/Narcotics/" target="_blank" title="此連結會於新視窗開啟"><img src="/tc/images/jbtc.gif" alt="" b width="137" height="103" border="0"> <br>禁毒廣播劇「戒不太遲」</a> </div> <div class="iso"> <a href="antidrug_themesong_2.htm"><img src="/tc/images/icon_antidrug_song_2.gif" border="0" width="137" height="103" alt="" b> <br>「不可一.不可再」全港青少年禁毒運動2009 主題曲「天造之材」</a> </div> <div class="iso"> <a href="antidrug_themesong.htm"><img src="/tc/images/icon_antidrug_song.gif" border="0" width="137" height="103" alt="" b> <br>「不可一.不可再」禁毒運動主題曲「不不不」</a> </div> </div> </td> </tr> <tr> <td>&nbsp;</td> <td>&nbsp;</td> </tr> </table> </td> </tr> <tr> <td> <div align="center"><img src="images/chi/botdot.jpg" alt="" width="602" height="3" style="width:603px;" ></div> <table align="center" border="0" cellpadding="0" cellspacing="0" width="98%"> <tbody><tr> <td><script language="JavaScript" src="../js/footer.js" type="text/javascript"></script><script language="JavaScript" type="text/javascript"> footer(); </script></td> <td> <div align="right"><span class="footer"><script type="text/javascript">var manual_date ="";lastrevision();</script></span></div> </td> </tr> <!--<script language="JavaScript" type="text/javascript"> footer_wcag(); </script>--> </tbody></table> </td> </tr> </table> </td> </tr> </table> <p>&nbsp;</p> <!-- Use genLayer.js to create the following code --> <!-- <div id="Layer2" style="position:absolute; left:4px; top:633px; width:146px; height:61px; z-index:2"> <div align="center"><a href="javascript:MM_openBrWindow('NDgame_c/c_game.htm','','width=640,height=480');"><img src="images/chi_beautiful.gif" alt="角色扮演禁毒遊戲 -- 美麗人生" border="0"></a></div> </div> <div id="Layer1" style="position:absolute; left:2px; top:550px; width:150px; height:74px; z-index:1"> <div align="center"><a href="javascript:MM_openBrWindow('c_flash.htm','','width=760,height=420');"><img src="images/chi_drug.gif" alt="啪丸--有何結局? 啪丸=玩完" border="0"></a></div> </div> --> <script type="text/javascript"> <!-- genfooterLayer(); //--> </script> </body> </html> """ soup = BeautifulSoup(html_doc, "lxml-xml") # print '---------------------------------' # all_p = soup.find_all('table')[1].find_all('td')[1].find_all('tr')[1].get_text().strip() # all_p = all_p.replace('0 cellpadding=0 cellspacing=0>', '') # all_p = all_p.replace('footer();', '') # all_p = all_p.replace('''left valign=top width="50">''', '') # all_p = all_p.replace('left valign=top>', '') # all_p = all_p.replace('''border="0">''', '') # all_p = all_p.replace('250>', '') # all_p = all_p.replace('\n', '') # all_p = all_p.replace('\t', '') # print all_p # print '---------------------------------' # all_p = soup.find_all('table')[1].find_all('td')[1].find_all('p') # titles = soup.find_all('h1') # title = '' # content = '' # for i in range(1, len(titles)): # title += titles[i].get_text().strip() # for i in range(0, len(all_p)-1): # content += all_p[i].get_text().strip() # content = content.replace('\n', '') # content = content.replace('\t', '') # content = title + '\n' + content # print content print '---------------------------------' # all_p = soup.find_all('table')[1].find_all('td')[1].find_all('p') all_p = soup.find_all('table')[1].find_all('td')[1] titles = soup.find_all('h1') title = '' content = '' for i in range(1, len(titles)): title += titles[i].get_text().strip() if all_p: ex1 = all_p.find_all(['p','h2','h3','h4','strong','a']) for i in range(0, len(ex1)-1): content += ex1[i].get_text().strip() content = re.sub(r'\w+=\"(\w+)?\"', "", content) content = re.sub(r'\w+=\w+', "", content) content = re.sub(r'\w+=\"\w+\:#\d+\;\w+\-\w+:\w+;\w+\-\w+:\w+\">', "", content) content = re.sub(r':#\d+;\w+-\w+:\w+;\w+-\w+:\w+', "", content) content = re.sub(r'>\"', "", content) content = re.sub(r'>', "", content) content = re.sub(r'\"', "", content) content = re.sub(r'top', "", content) content = content.replace('\n', '') content = content.replace('\t', '') content = title + '\n' + content print content print '---------------------------------'
UTF-8
Python
false
false
15,897
py
38
test2.py
37
0.579865
0.554997
0
348
41.867816
244
dangpzanco/disciplina-fala
240,518,198,735
62d8a7408590e1acf65e1a4390595dec684cee5f
0a84d748400b9c7fc9121b4a76a2bf8d1d75de14
/experiment/experiment.py
783fb17d4ab7989bbd10a178b7e20a1beaf43cec
[]
no_license
https://github.com/dangpzanco/disciplina-fala
3649a103777370fb7a8b15fce37e97446074c9f4
04b4213084969304d44efa3adb6198af101cfd83
refs/heads/master
2020-07-30T21:35:56.252159
2019-12-04T14:19:40
2019-12-04T14:19:40
210,365,686
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import pathlib import soundfile as sf import librosa import librosa.display import numpy as np import numpy.random as rnd import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm import trange import shutil rnd.seed(0) metadata = pd.read_csv('../analysis/speechmetrics_results.csv') metadata = metadata.drop(['pesq', 'stoi', 'srmr'], axis=1) print(metadata.columns) print(metadata) technique_list = ['noisy', 'wiener', 'bayes', 'binary'] num_techniques = len(technique_list) # exit() snr_values = np.array([-20,-10,0,10,20]) num_snr = snr_values.size # 5 files_per_snr = 4 num_rep = 1 exp_folder = pathlib.Path('exp_data') exp_folder.mkdir(parents=True, exist_ok=True) speech_folder = pathlib.Path('../data/speech/') noisy_folder = pathlib.Path('../data/speech+noise/') # id_list = [] # filename = [] # speech_name = [] # noise_name = [] # realization = [] # SNR = [] # technique = [] # rep_list = [] # Metadata definitions num_files = num_snr * (files_per_snr + num_rep) * num_techniques exp_metadata = pd.DataFrame(index=np.arange(num_files), columns=['id', 'filename', 'speech_name', 'noise_name', 'realization', 'SNR', 'technique', 'rep']) file_list = [] exp_index = 0 for k, tech in enumerate(technique_list): for i in range(num_snr): ind = metadata['SNR'] == snr_values[i] rnd_ind = rnd.permutation(ind.sum())[:files_per_snr] filenames = metadata['filename'].values[ind][rnd_ind] for j, item in enumerate(filenames): exp_metadata['filename'][exp_index] = item exp_metadata['speech_name'][exp_index] = item.split('_')[0] exp_metadata['noise_name'][exp_index] = item.split('_')[1][:-1] exp_metadata['realization'][exp_index] = item.split('_')[1][-1] exp_metadata['SNR'][exp_index] = float(item.split('_')[-1]) exp_metadata['technique'][exp_index] = tech exp_metadata['rep'][exp_index] = False exp_index +=1 file_list.append(item) exp_metadata['rep'][exp_index-1] = True # Repeated audios for j in range(num_rep): exp_metadata['filename'][exp_index] = item exp_metadata['speech_name'][exp_index] = item.split('_')[0] exp_metadata['noise_name'][exp_index] = item.split('_')[1][:-1] exp_metadata['realization'][exp_index] = item.split('_')[1][-1] exp_metadata['SNR'][exp_index] = float(item.split('_')[-1]) exp_metadata['technique'][exp_index] = tech exp_metadata['rep'][exp_index] = True exp_index +=1 file_list.append(item) num_files = len(file_list) print(num_files, file_list) exp_metadata = exp_metadata.sample(frac=1).reset_index(drop=True) exp_metadata['id'] = np.arange(num_files) print(exp_metadata) exp_metadata.to_csv('exp_metadata.csv', index=False) subject_metadata = exp_metadata.drop([ 'filename', 'speech_name', 'noise_name', 'realization', 'SNR', 'technique', 'rep'], axis=1) subject_metadata['quality'] = -1 print(subject_metadata) # exit() for i in range(num_files): tech = exp_metadata['technique'][i] if tech is 'noisy': processed_folder = pathlib.Path('../data/speech+noise/') else: processed_folder = pathlib.Path('../data/processed/') / tech in_filename = exp_metadata['filename'][i] out_filename = exp_metadata['id'][i] # print(in_filename, out_filename) src = processed_folder / f'{in_filename}.wav' dst = exp_folder / f'{out_filename:03}.wav' shutil.copy2(str(src), str(dst)) print(src, dst) # exp_metadata = pd.DataFrame(index=[],columns=['filename', 'quality']) # exp_metadata['filename'] = sorted(file_list) # exp_metadata['quality'] = 0 # print(exp_metadata) subject_metadata.to_csv(exp_folder / 'subject_metadata.csv', index=False)
UTF-8
Python
false
false
3,893
py
27
experiment.py
16
0.621885
0.612381
0
144
26.020833
75
felipediel/django-commerce
463,856,474,394
6e1f1cadcaf9ca3939faeba100d65e749486a4f2
47045b7b7ef3c6f67bef89cbbc82a597773eb366
/commerce/views/cart.py
738193581c02f6c99871727bb34cb553ad31b954
[ "Apache-2.0" ]
permissive
https://github.com/felipediel/django-commerce
06fecdbd302b33c3cce4284ffc9fe9219a57672e
b992bf4c81ca6dfaad9ccd423d25fba9d255f159
refs/heads/master
2023-06-16T14:51:49.301650
2021-07-12T07:04:39
2021-07-12T07:04:39
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.contenttypes.models import ContentType from django.shortcuts import get_object_or_404, redirect from django.utils.translation import ugettext_lazy as _ from django.views import View from django.views.generic import DetailView, UpdateView from commerce import settings as commerce_settings from commerce.forms import AddressesForm, ShippingAndPaymentForm, DiscountCodeForm from commerce.models import Cart, Order, PaymentMethod, Item, Option, ShippingOption from commerce.templatetags.commerce import discount_for_product class AddToCartView(LoginRequiredMixin, View): def get(self, request, *args, **kwargs): content_type = get_object_or_404(ContentType, id=kwargs['content_type_id']) product = get_object_or_404(content_type.model_class(), id=kwargs['object_id']) option = get_object_or_404(Option, slug_i18n=request.GET['option']) if 'option' in request.GET else None cart = Cart.get_for_user(request.user) # TODO: settings: # TODO: check if product can be added multiple times into cart # TODO: max items in cart ALLOW_MULTIPLE_SAME_ITEMS = False MAX_ITEMS = 3 if cart.items_quantity >= MAX_ITEMS: messages.warning(request, _(f'You can order at most %d items at once') % MAX_ITEMS) else: if ALLOW_MULTIPLE_SAME_ITEMS or not cart.has_item(product, option): # add item into cart cart.add_item(product, option) # discount if cart.discount: # remove discount if it is not valid anymore if not cart.discount.is_valid: cart.discount = None cart.save(update_fields=['discount']) if not cart.discount: # if no discount is applied yet, check if there is a valid discount available for product self.apply_discount_by_product(cart, product) messages.info(request, _('%s was added into cart') % product) else: messages.warning(request, _('%s is already in cart') % product) back_url = request.GET.get('back_url', cart.get_absolute_url()) return redirect(back_url) def apply_discount_by_product(self, cart, product): discount = discount_for_product({'request': self.request}, product) if discount and discount.add_to_cart: cart.discount = discount cart.save(update_fields=['discount']) class UnapplyDiscountCartView(LoginRequiredMixin, View): def get(self, request, *args, **kwargs): cart = Cart.get_for_user(request.user) cart.discount = None cart.save(update_fields=['discount']) back_url = request.GET.get('back_url', cart.get_absolute_url()) return redirect(back_url) class RemoveFromCartView(LoginRequiredMixin, View): def get(self, request, *args, **kwargs): item = get_object_or_404(Item, id=kwargs['item_id']) cart = Cart.get_for_user(request.user) if item in cart.item_set.all(): item.quantity -= 1 item.save(update_fields=['quantity']) if item.quantity <= 0: item.delete() messages.info(request, _('%s removed from cart') % item) # discount if cart.discount: # remove discount if it is not valid anymore if not cart.discount.is_valid: cart.discount = None cart.save(update_fields=['discount']) # unset loyalty points if cart.subtotal < 0 < cart.loyalty_points: cart.update_loyalty_points() # delete empty cart if not cart.item_set.exists(): cart.delete() back_url = request.GET.get('back_url', cart.get_absolute_url()) return redirect(back_url) class CartMixin(LoginRequiredMixin): model = Cart def get_object(self, queryset=None): return self.model.get_for_user(self.request.user) class CartDetailView(CartMixin, UpdateView): form_class = DiscountCodeForm template_name = 'commerce/cart_detail.html' def get_context_data(self, **kwargs): context_data = super().get_context_data(**kwargs) context_data.update({ 'loyalty_program_enabled': commerce_settings.LOYALTY_PROGRAM_ENABLED, }) return context_data def get_form_kwargs(self): form_kwargs = super().get_form_kwargs() form_kwargs.update({ 'user': self.request.user }) return form_kwargs class EmptyCartRedirectMixin(object): def dispatch(self, request, *args, **kwargs): cart = self.get_object() if cart.is_empty(): return redirect(cart.get_absolute_url()) return super().dispatch(request, *args, **kwargs) class CheckoutAddressesView(CartMixin, EmptyCartRedirectMixin, UpdateView): template_name = 'commerce/checkout_form.html' form_class = AddressesForm def get_initial(self): initial = super().get_initial() user = self.object.user last_user_order = user.order_set.last() # TODO: refactor if last_user_order: initial.update({ 'delivery_name': self.object.delivery_name or last_user_order.delivery_name, 'delivery_street': self.object.delivery_street or last_user_order.delivery_street, 'delivery_postcode': self.object.delivery_postcode or last_user_order.delivery_postcode, 'delivery_city': self.object.delivery_city or last_user_order.delivery_city, 'delivery_country': self.object.delivery_country or last_user_order.delivery_country, 'billing_name': self.object.billing_name or last_user_order.billing_name, 'billing_street': self.object.billing_street or last_user_order.billing_street, 'billing_postcode': self.object.billing_postcode or last_user_order.billing_postcode, 'billing_city': self.object.billing_city or last_user_order.billing_city, 'billing_country': self.object.billing_country or last_user_order.billing_country, 'reg_id': self.object.reg_id or last_user_order.reg_id, 'tax_id': self.object.tax_id or last_user_order.tax_id, 'vat_id': self.object.vat_id or last_user_order.vat_id, 'email': self.object.email or last_user_order.email, 'phone': self.object.phone or last_user_order.phone, }) else: initial.update({ 'delivery_name': self.object.delivery_name or user.get_full_name(), 'delivery_street': self.object.delivery_street or user.street, 'delivery_postcode': self.object.delivery_postcode or user.postcode, 'delivery_city': self.object.delivery_city or user.city, 'delivery_country': self.object.delivery_country or user.country, 'billing_name': self.object.billing_name or user.get_full_name(), 'billing_street': self.object.billing_street or user.street, 'billing_postcode': self.object.billing_postcode or user.postcode, 'billing_city': self.object.billing_city or user.city, 'billing_country': self.object.billing_country or user.country, 'email': self.object.email or user.email, 'phone': self.object.phone or user.phone, }) return initial def form_valid(self, form): form.save() return redirect('commerce:checkout_shipping_and_payment') class CheckoutShippingAndPaymentView(CartMixin, EmptyCartRedirectMixin, UpdateView): template_name = 'commerce/checkout_form.html' form_class = ShippingAndPaymentForm def form_valid(self, form): form.save() return redirect('commerce:checkout_summary') def get_initial(self): initial = super().get_initial() shipping_options = ShippingOption.objects.for_country(self.object.delivery_country) if shipping_options.count() == 1: initial.update({ 'shipping_option': shipping_options.first() }) payment_methods = PaymentMethod.objects.all() if payment_methods.count() == 1: initial.update({ 'payment_method': payment_methods.first() }) return initial class CheckoutSummaryView(CartMixin, EmptyCartRedirectMixin, DetailView): template_name = 'commerce/checkout_summary.html' def get_context_data(self, **kwargs): context_data = super().get_context_data(**kwargs) context_data.update({ 'loyalty_program_enabled': commerce_settings.LOYALTY_PROGRAM_ENABLED, }) return context_data class CheckoutFinishView(CartMixin, DetailView): def get(self, request, *args, **kwargs): cart = self.get_object() if cart.can_be_finished(): order_status = Order.STATUS_AWAITING_PAYMENT if cart.total > 0 else Order.STATUS_PENDING order = cart.to_order(status=order_status) if order.status != Order.STATUS_AWAITING_PAYMENT: return redirect(order.get_absolute_url()) if not order.payment_method: messages.error(request, _('Missing payment method')) return redirect(order.get_absolute_url()) if order.payment_method.method == PaymentMethod.METHOD_ONLINE_PAYMENT: return redirect(order.get_payment_url()) return redirect(order.get_absolute_url()) else: messages.warning(request, _('Checkout process can not be finished yet')) return redirect(cart.get_absolute_url())
UTF-8
Python
false
false
9,968
py
4
cart.py
3
0.626404
0.623997
0
248
39.193548
112
abu-sayem/Data-Structures-Algorithms-And-Databases
5,085,241,310,017
b39f55f73025ed2ef605a8f93881610365a9b4d6
d78a742d4b4109c3af39216edfc3f8fb5706cc36
/leetcode/653. Two Sum IV - Input is a BST.py
91ea31c213d9121589b19fca7e7807b8ddbc6318
[]
no_license
https://github.com/abu-sayem/Data-Structures-Algorithms-And-Databases
d4a39278e1913e758f7515ad2e697d2253a92454
f593161d912b3521249f9cfd410655d6a5ce4355
refs/heads/master
2023-04-20T03:42:27.292639
2021-04-27T03:30:24
2021-04-27T03:30:24
98,731,558
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def findTarget(self, root: TreeNode, k: int) -> bool: store = set() list = [root] while list: temp = list.pop() if temp.val in store: return True store.add(k-temp.val) if temp.left: list.append(temp.left) if temp.right: list.append(temp.right) return False
UTF-8
Python
false
false
581
py
33
653. Two Sum IV - Input is a BST.py
33
0.487091
0.487091
0
22
25.409091
57
roger3/pom
15,092,515,123,551
d2f2fe7ad2efab7b7a647e20570772237b148b22
28359230e823d6dc6fbe53f607a12f1b30a74d9e
/pom/game/migrations/0028_auto_20160222_2227.py
01216c647f4a825a39d0ade3e7c2c8c82bd35b08
[]
no_license
https://github.com/roger3/pom
5bd9720adabe7f3338ce0a24756846eac0991bb9
f3e2227064fade140c24046b054a7f006adc90b3
refs/heads/master
2019-07-22T18:33:59.211289
2016-05-20T13:37:48
2016-05-20T13:37:48
55,376,032
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-02-22 22:27 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('game', '0027_auto_20160222_2215'), ] operations = [ migrations.AlterField( model_name='inhabitant', name='workplace', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='workplace', to='game.Workplace'), ), ]
UTF-8
Python
false
false
591
py
25
0028_auto_20160222_2227.py
20
0.64467
0.590525
0
21
27.142857
154
choandrew/Project-Euler-with-Python
4,183,298,162,528
ab4bc22415e3110fa4954f239c8863fe2917a7a9
233f22c397e78024cdff3d8c8006a829fba34659
/Project Euler 7.py
38148192e61c0505cfd46b35fc7173899d642f5b
[]
no_license
https://github.com/choandrew/Project-Euler-with-Python
96aff8daa57cf84aa8b9a904c0fbaa21ae63b91b
20f270869dff99d5b3ed651833240d38277b4ea2
refs/heads/master
2021-01-01T06:04:57.693954
2015-04-23T07:51:18
2015-04-23T07:51:18
31,481,912
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10 001st prime number? """ import math from datetime import datetime startTime = datetime.now() #to measure speed n = 10001 # int(input("What number prime do you want? ")) primes = [2] m = 3 while len(primes) < n: a = [] for divisors in range(2,math.floor(math.sqrt(m))+1): a.append(m % divisors != 0) if all(a) == True: primes.append(m) m += 2 print(primes[-1]) print(datetime.now()-startTime)
UTF-8
Python
false
false
555
py
15
Project Euler 7.py
14
0.625225
0.574775
0
29
18.137931
74
bschnitz/recipes
11,828,339,942,629
5f074f5d619648f5e382267a19cc6b7972c37625
efcecf1f695e371dfbbc5c58ac7ef7ad33366e90
/recipes/gui/form/recipe/instructions/instruction_section.py
c1b2d0b6e558a881a9410d7089317c64401d29b2
[ "MIT" ]
permissive
https://github.com/bschnitz/recipes
7d584376199f2a2e5c4dfa537106cdf2e3192292
8af348774a1edc11ccab3da9753bc456c19f2000
refs/heads/master
2022-05-22T21:39:35.309599
2022-03-22T08:22:01
2022-03-22T08:22:01
155,336,136
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import wx from recipes.gui.fonts import Head2 from recipes.gui.form.framework import TitleBox from recipes.gui.form.framework import PaddedBox from recipes.gui.form.framework import AutoResizeMultilineText from wx.lib.expando import ExpandoTextCtrl, EVT_ETC_LAYOUT_NEEDED class InstructionSection: def __init__(self, parent): self.parent = parent title = 'Instructions for first meal' titled_box = TitleBox(self.form(parent), parent, title, Head2()) parent.SetSizer(PaddedBox(titled_box)) def form(self, parent): sizer = wx.GridBagSizer(10, 10) label = wx.StaticText(parent, label='Section Title') input = wx.TextCtrl(parent) sizer.Add(label, pos=(0,0), flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL) sizer.Add(input, pos=(0,1), flag=wx.EXPAND) instructions_input = AutoResizeMultilineText(parent) sizer.Add(instructions_input, pos=(1,0), span=(1, 2), flag=wx.EXPAND) sizer.AddGrowableCol(1) return sizer
UTF-8
Python
false
false
1,022
py
46
instruction_section.py
42
0.690802
0.676125
0
29
34.241379
80
mhuijsmans/sandbox
11,201,274,708,525
83ef2f9b51371ca26614c716a95fd3df45fe7f9a
1e5e98034373ac7b58fe11f9b4a3bbb5e2d8acf2
/c++/scons_hierarchical2/components/comp4/test/SConscript
4d65b141c7127f3b8a456173aef0d108dc28e859
[]
no_license
https://github.com/mhuijsmans/sandbox
9718cb4414975502033abe791d2c2de747c5a04c
eb036140d91ea74af1b0215f5d5899ca070bd26c
refs/heads/master
2022-12-10T14:33:45.213736
2022-12-08T21:57:30
2022-12-08T21:57:30
44,774,226
1
1
null
false
2022-12-10T06:22:02
2015-10-22T21:26:43
2022-01-09T00:56:18
2022-12-10T06:22:02
6,809
0
1
142
Java
false
false
Import("env") opt = env.CreateClone('comp4.test') # COMP4 depends on COMP2 opt.BuildTests('comp4_tests', [ opt.Glob('*.cpp') ] )
UTF-8
Python
false
false
128
1,639
SConscript
1,364
0.679688
0.648438
0
4
31.25
53
swopnilnep/kattis
15,161,234,600,391
c01f59f5c233efbfb5bdbc73c779779a64814641
d5919f63f2a0f0f5758be0c9bdcc12b61a1bcd7d
/python3/countingstars/countingstars.py
123cffc730ece9c5b5465704355d2a9c47ff4c01
[ "MIT" ]
permissive
https://github.com/swopnilnep/kattis
e7f18f34c6be374aae89c2011155bc0d852d614e
8e41c83985137fad20e59416a3a0f9c3ed0ae847
refs/heads/master
2021-06-20T06:04:33.645790
2021-01-28T09:15:37
2021-01-28T09:15:37
167,257,410
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
from sys import setrecursionlimit, stdin setrecursionlimit(10 ** 4) def remove(items, x, y): if items[x][y]: items[x][y] = False for i, j in ((0, 1),(0,-1),(1, 0),(-1, 0)): x_mod, y_mod = x + i, y + j if not any((x_mod < 0,\ y_mod < 0,\ x_mod >= len(items),\ y_mod >= len(items[0]))): items = remove(items, x_mod, y_mod) return items for row_num, meta in enumerate(stdin): x_axis, y_axis = map(int, meta.split()) is_star = [[False for x in range(y_axis)] for x in range(x_axis)] for l in range(x_axis): line = stdin.readline() for star in range(y_axis): if line[star] == "-": is_star[l][star] = True count = 0 not_visited = list(range(len(is_star))) while len(not_visited) > 0: for x in not_visited: line = is_star[x] if True in line: y = line.index(True) is_star = remove(is_star, x, y) count += 1 else: not_visited.remove(x) continue print(f"Case {row_num + 1}: {count}")
UTF-8
Python
false
false
1,156
py
62
countingstars.py
31
0.479239
0.463668
0
35
32.057143
77
ZakHussain/Python_OOP
15,693,810,502,465
a649a2d9e0246762c6971d7a8a9554946912cccb
d517bd40bfe43938dea47b0a815d8d3660e4609e
/Building_Objects_OOP.py
63d3e8de1f9ebc5eb0507391f8f06c67426df9b4
[]
no_license
https://github.com/ZakHussain/Python_OOP
6e943a9d31ad333accaeb353e06bd0d0090bbeb7
953285985836b8e05fef8b6178d0541dfc066fbb
refs/heads/master
2021-01-22T07:27:18.653986
2017-02-13T11:15:28
2017-02-13T11:15:28
81,816,207
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# The Goal of this program is to get practice with Object Oriented Programming (OOP). # Here we create the 'blueprints' for 4 kind of objects - Cat, Human, Bike, and Car. # The Cat object requires a color, type, and age parameter to create an instance of a 'Cat'. class Cat(object): def __init__(self, color, type, age): self.color = color self.type = type self.age = age # The Human object requires no user input, and will print "New Human" once initialized. class Human(object): def __init__(self, clan=None): print "New Human!!!" # The Bike object has an initialization that takes form, prim, max_speed, and miles as input. # It has a displayinfo(), ride(), and reverse() method to allow for interaction with the object. class Bike(object): def __init__(self, form, price, max_speed, miles): self.form = form self.price = price self.max_speed = max_speed self.miles = 0 def displayinfo(self): print self.form print "Price for this is $" + str(self.price) print "Top speeds for this is "+ str(self.max_speed)+ 'mph' print "Total miles " + str(self.miles) + " miles " def ride(self): print 'driving' self.miles += 10 def reverse(self): print 'Reversing' if self.miles >= 5: self.miles -= 5 # The car object is initialized with four attributes, price, speed, fuel, and mileage. # It also contains a display_all method that will show the information about the state of the #car class Car(object): def __init__(self, price, speed, fuel, mileage): self.price = price self.speed = speed self.fuel = fuel self.mileage = mileage self.tax = .15 if self.price <= 10000: self.tax = 0.12 def display_all(self): print 'the price is ', str(self.price) print 'the speed of this car is ', str(self.speed) print 'the current tank is ', str(self.fuel) print 'the present mileage is ', str(self.mileage) print self.tax # here, we create an instance of a Cat, that we named 'garfield.' This # instance now has all the attributes that make a 'Cat,' (color, type, age, etc) garfield = Cat('orange', 'fat', 5) print garfield.color print garfield.type print garfield.age # these following calls create instances of the 'Bike' object locomotion = Bike('\n locomotion', 100, 200 , 0) tricycle = Bike('\n trycyle', 2,20, 0) motorcycle = Bike('\n motorcycle', 1000, 200, 0) # the following object.method() calls, calls of the methods of the specific object locomotion.ride() locomotion.ride() locomotion.ride() locomotion.reverse() locomotion.displayinfo() tricycle.ride() tricycle.ride() tricycle.reverse() tricycle.reverse() tricycle.displayinfo motorcycle.reverse() motorcycle.reverse() motorcycle.reverse() motorcycle.displayinfo() # the following object 'Kirby' is an instance of a 'Car' object Kirby1 = Car(10000, 35, 'Full', 15) # the following print methods call on the 'fields' of the Kirby, Car Object print Kirby1.speed print Kirby1.fuel print Kirby1.mileage print Kirby1.price print Kirby1.tax
UTF-8
Python
false
false
3,015
py
3
Building_Objects_OOP.py
2
0.700166
0.68325
0
100
28.84
96
andreas-ibm/mqtt-bridge-websockets-python
6,674,379,220,142
e9197f9b46df068933ff379fea588179b843f103
fa4fe37bd76285ed0cbc1216908c22f43b1af427
/app.py
1ca3b2dea51da2e6e8552b381087e4410541095e
[]
no_license
https://github.com/andreas-ibm/mqtt-bridge-websockets-python
80c62d7805c81733135ba76a7ad3a71f61b84c59
362ab7654e9222c8ef54991c9a96b0433690305c
refs/heads/master
2023-05-15T05:58:22.138023
2023-05-02T09:40:58
2023-05-02T09:40:58
236,773,791
0
1
null
false
2023-05-02T09:41:00
2020-01-28T15:53:18
2020-01-28T15:53:49
2023-05-02T09:40:59
2
0
0
0
Python
false
false
# MQTT Standalone bridge for sending data using WebSockets import argparse import paho.mqtt.client as paho import time import threading import uuid from flask import Flask parser = argparse.ArgumentParser() parser.add_argument("-s","--sourcebroker", help="The hostname of the broker to subscribe to") parser.add_argument("-p","--sourceport", type=int, help="The port of the broker to subscribe to", default=1883) parser.add_argument("-d","--targetbroker", help="The hostname of the broker to publish to") parser.add_argument("-o","--targetport", type=int, help="The port of the broker to publish to", default=9001) parser.add_argument("-e","--endpoint",help="The endpoint to register the edge broker as, defaults to ws://<sourcebroker>:9001") parser.add_argument("-t","--topic", help="The topic to bridge", default='#') parser.add_argument("-v","--verbose", help="Be verbose about relaying messages", action="store_true") args = parser.parse_args() app = Flask(__name__) app.debug = False @app.route('/') def hello(): return "Bridge config: Bridging {} from {}({}) to {}({})".format(arguments.topic, arguments.sourcebroker, arguments.sourceport, arguments.targetbroker, arguments.targetport) def on_subscribe(client, userdata, mid, granted_qos): #create function for callback print("subscribed with qos",granted_qos, "\n") pass def on_target_message(client, userdata, message): ## we didn't really expect to receive anything here... print("message received from target " ,str(message.payload.decode("utf-8"))) def on_publish(client,userdata,mid): #create function for callback if args.verbose: print("data published mid=",mid, "\n") pass def on_disconnect(client, userdata, rc): print("client disconnected ok") def on_source_connect(client, userdata, flags, rc): print("Connected to source broker with rc={}".format(rc)) print(" subscribing to ",args.topic) client.subscribe(args.topic) def main(arguments): threading.Thread(target=app.run).start() print("Bridge config: Bridging {} from {}({}) to {}({})".format(arguments.topic, arguments.sourcebroker, arguments.sourceport, arguments.targetbroker, arguments.targetport)) # the function that will do the actual bridging def on_source_message(client, userdata, message): ## this needs to pass on to the target if arguments.verbose: print("message received from source:\n\t{}\n\t{}".format(message.topic,str(message.payload.decode("utf-8")))) publisher.publish(message.topic, message.payload) # connect to the target broker id = str(uuid.uuid4().fields[-1])[:5] subscriber = paho.Client("mqtt-bridge-source-"+id) # we want to pass a lot of messages around subscriber.max_inflight_messages_set(300) # Use callback functions, pass them in subscriber.on_subscribe = on_subscribe subscriber.on_publish = on_publish subscriber.on_message = on_source_message subscriber.on_disconnect = on_disconnect subscriber.on_connect = on_source_connect print("connecting to broker ",arguments.sourcebroker,"on port ",arguments.sourceport) subscriber.connect(arguments.sourcebroker,arguments.sourceport) # connect to the target broker will = "" topic_will = "mqtt/edge" publisher = paho.Client("mqtt-bridge-target-"+id,transport='websockets') publisher.will_set(topic_will, payload=will, qos=0, retain=False) publisher.max_inflight_messages_set(300) # use callback functions, some are the same as the source broker. publisher.on_subscribe = on_subscribe publisher.on_publish = on_publish publisher.on_message = on_target_message publisher.on_disconnect = on_disconnect print("connecting to broker ",arguments.targetbroker,"on port ",arguments.targetport) publisher.connect(arguments.targetbroker,arguments.targetport) # Tell the broker that there is now an edge broker it's getting data from endpoint = "ws://"+arguments.sourcebroker+":9001" if arguments.endpoint is not None: endpoint = arguments.endpoint print("publishing edge endpoint "+endpoint+" to broker") publisher.publish(topic_will, endpoint, retain=True) publisher.loop_start() # keep going foverever! subscriber.loop_forever() if __name__ == "__main__": main(args)
UTF-8
Python
false
false
4,318
py
3
app.py
1
0.714451
0.707967
0
103
40.854369
176
GraphicalDot/Assignments
13,116,830,124,943
bd7c2ae7710ea045235c97557a0b59128677f464
c904a0066c22f54c5bdb747bd822e8a522d94808
/assignment_1_nov_2014.py
c3d4c047cc4f455c5cab4071eb97c7324ad9dcb8
[]
no_license
https://github.com/GraphicalDot/Assignments
c273f3cf66f2012583e0fc4552f85d914fefaec9
9632f9fa1284a00001fad672a2456e3f67379012
refs/heads/master
2021-05-28T07:12:04.582583
2015-01-13T08:26:03
2015-01-13T08:26:03
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python def Upto_you(): """ Write the most fancier 10 if, else statements, Its upto you """ pass def sort_list(): """ 1.Prepare a list of all alphabets 2.Prepare a list by shuffling them and joining them without spaces 3.From this list which will have 20 elements, prepare a list of dictionaries with key as 1 to 20 and values as the above mentioned randomly joined alphabets. 4.sort this list in descending order according to the value Hint: Output is like [{1: 'afmeniyhqvkdxrlocswgjpbtu'}, {2: jdtprombhueifnygskvclwxqa'}, ....so on] """ pass def another_sort_list(): """ From the update of sort_list() Step2: Update every dictionary present in the list by removing last three elements from each value Hint [{1: 'afmeniyhqvkdxrlocswgjp'}, {2: jdtprombhueifnygskvclw'}, ....so on] {'a': 1, 'c': 3, 'b': 2, 'e': 5, 'd': 4, 'g': 7, 'f': 6, 'i': 9, 'h': 8, 'k': 11, 'j': 10, 'm': 13, 'l': 12, 'o': 15, 'n': 14, 'q': 17, 'p': 16, 's': 19, 'r': 18, 'u': 21, 't': 20, 'w': 23, 'v': 22, 'y': 25, 'x': 24, 'z': 26} Then sum the values according to the above mentioned dictionary The above mentioned dictionary is just for your reference, dont copy that and make your own code Hint: Output is like [{1: 'afmeniyhqvkdxrlocswgjpbtu', "sum": "282"}, {2: jdtprombhueifnygskvclw', "sum": "283"}, ....so on] """ new_dict = dict() #Preparing the list like this {'a': 1, 'c': 3, 'b': 2, 'e': 5, 'd': 4, 'g': 7 and so on [new_dict.update({element[1]:element[0]})for element in zip(range(1,27), map(chr,range(ord("a"),ord("z")+1)))] h = lambda x : sum([new_dict[element] for element in x ]) [element.update({"sum": h(element.values()[0])}) for element in shivam_dict] return shivam_dict def lambda__(): """ should returns a output of a list which will not have prime numbers upto 1000 """ pass def and_or(): """ returns a list which will not have any number divisible by [18, 19, 21, 99, 45] Original list will have 1, 10000 """ pass def exception_handling(): """ Handle exception After handling this exception raise your own excpetion which should only print the error messege as the output """ pass def open_file(file_path): """ Print the contents of the file ying on file_path Now opena new file in your home directory, Write something in that file and then mv that file into this current directory Hint: os or subprocess module """ def convert_string_to_list(string): """ Convert this string into a list string = "HEY_MAN_WHAT_the_fuck_is_going_on" output = ["Hey", "man", "what", "the", "fuck", "is"m "going", "on"] The convert this list into string = "hey man what the fuck is going on"" Sonvert this string into string = "hey man, everything is great" Everything what you have done shall be done in one line with the help of list comprehensions """ pass if __name__ == "__main__":
UTF-8
Python
false
false
2,970
py
4
assignment_1_nov_2014.py
4
0.643771
0.610101
0
120
23.708333
140
tedhtchang/kfserving
16,389,595,216,730
3e53452c7d94319102fe8b3d9bf10cca9ee1cce9
7d476ec8de08ccdc4e986faefe0512b205c0d219
/python/kserve/kserve/storage/test/test_s3_storage.py
c52d52f97a4bfd501d7d97569dfb62c08ea85b73
[ "Apache-2.0" ]
permissive
https://github.com/tedhtchang/kfserving
67912db2e7e39805e2048e277b3771b156bfd679
f2f0717a9d6341b6ec9b939bdd324b2c8c507551
refs/heads/master
2023-08-17T22:25:43.120863
2023-08-06T21:36:10
2023-08-06T21:36:10
303,819,073
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright 2021 The KServe Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest.mock as mock from botocore.client import Config from botocore import UNSIGNED from kserve.storage import Storage STORAGE_MODULE = 'kserve.storage.storage' def create_mock_obj(path): mock_obj = mock.MagicMock() mock_obj.key = path mock_obj.is_dir = False return mock_obj def create_mock_boto3_bucket(mock_storage, paths): mock_s3_resource = mock.MagicMock() mock_s3_bucket = mock.MagicMock() mock_s3_bucket.objects.filter.return_value = [create_mock_obj(p) for p in paths] mock_s3_resource.Bucket.return_value = mock_s3_bucket mock_storage.resource.return_value = mock_s3_resource return mock_s3_bucket def get_call_args(call_args_list): arg_list = [] for call in call_args_list: args, _ = call arg_list.append(args) return arg_list def expected_call_args_list_single_obj(dest, path): return [( f'{path}'.strip('/'), f'{dest}/{path.rsplit("/", 1)[-1]}'.strip('/'))] def expected_call_args_list(parent_key, dest, paths): return [(f'{parent_key}/{p}'.strip('/'), f'{dest}/{p}'.strip('/')) for p in paths] # pylint: disable=protected-access @mock.patch(STORAGE_MODULE + '.boto3') def test_parent_key(mock_storage): # given bucket_name = 'foo' paths = ['models/weights.pt', '0002.h5', 'a/very/long/path/config.json'] object_paths = ['bar/' + p for p in paths] # when mock_boto3_bucket = create_mock_boto3_bucket(mock_storage, object_paths) Storage._download_s3(f's3://{bucket_name}/bar', 'dest_path') # then arg_list = get_call_args(mock_boto3_bucket.download_file.call_args_list) assert arg_list == expected_call_args_list('bar', 'dest_path', paths) mock_boto3_bucket.objects.filter.assert_called_with(Prefix='bar') @mock.patch(STORAGE_MODULE + '.boto3') def test_no_key(mock_storage): # given bucket_name = 'foo' object_paths = ['models/weights.pt', '0002.h5', 'a/very/long/path/config.json'] # when mock_boto3_bucket = create_mock_boto3_bucket(mock_storage, object_paths) Storage._download_s3(f's3://{bucket_name}/', 'dest_path') # then arg_list = get_call_args(mock_boto3_bucket.download_file.call_args_list) assert arg_list == expected_call_args_list('', 'dest_path', object_paths) mock_boto3_bucket.objects.filter.assert_called_with(Prefix='') @mock.patch(STORAGE_MODULE + '.boto3') def test_full_name_key(mock_storage): # given bucket_name = 'foo' object_key = 'path/to/model/name.pt' # when mock_boto3_bucket = create_mock_boto3_bucket(mock_storage, [object_key]) Storage._download_s3(f's3://{bucket_name}/{object_key}', 'dest_path') # then arg_list = get_call_args(mock_boto3_bucket.download_file.call_args_list) assert arg_list == expected_call_args_list_single_obj('dest_path', object_key) mock_boto3_bucket.objects.filter.assert_called_with(Prefix=object_key) @mock.patch(STORAGE_MODULE + '.boto3') def test_full_name_key_root_bucket_dir(mock_storage): # given bucket_name = 'foo' object_key = 'name.pt' # when mock_boto3_bucket = create_mock_boto3_bucket(mock_storage, [object_key]) Storage._download_s3(f's3://{bucket_name}/{object_key}', 'dest_path') # then arg_list = get_call_args(mock_boto3_bucket.download_file.call_args_list) assert arg_list == expected_call_args_list_single_obj('dest_path', object_key) mock_boto3_bucket.objects.filter.assert_called_with(Prefix=object_key) AWS_TEST_CREDENTIALS = {"AWS_ACCESS_KEY_ID": "testing", "AWS_SECRET_ACCESS_KEY": "testing", "AWS_SECURITY_TOKEN": "testing", "AWS_SESSION_TOKEN": "testing"} def test_get_S3_config(): DEFAULT_CONFIG = Config() ANON_CONFIG = Config(signature_version=UNSIGNED) VIRTUAL_CONFIG = Config(s3={"addressing_style": "virtual"}) with mock.patch.dict(os.environ, {}): config1 = Storage.get_S3_config() assert vars(config1) == vars(DEFAULT_CONFIG) with mock.patch.dict(os.environ, {"awsAnonymousCredential": "False"}): config2 = Storage.get_S3_config() assert vars(config2) == vars(DEFAULT_CONFIG) with mock.patch.dict(os.environ, AWS_TEST_CREDENTIALS): config3 = Storage.get_S3_config() assert vars(config3) == vars(DEFAULT_CONFIG) with mock.patch.dict(os.environ, {"awsAnonymousCredential": "True"}): config4 = Storage.get_S3_config() assert config4.signature_version == ANON_CONFIG.signature_version # assuming Python 3.5 or greater for joining dictionaries credentials_and_anon = {**AWS_TEST_CREDENTIALS, "awsAnonymousCredential": "True"} with mock.patch.dict(os.environ, credentials_and_anon): config5 = Storage.get_S3_config() assert config5.signature_version == ANON_CONFIG.signature_version with mock.patch.dict(os.environ, {"S3_USER_VIRTUAL_BUCKET": "False"}): config6 = Storage.get_S3_config() assert vars(config6) == vars(DEFAULT_CONFIG) with mock.patch.dict(os.environ, {"S3_USER_VIRTUAL_BUCKET": "True"}): config7 = Storage.get_S3_config() assert config7.s3["addressing_style"] == VIRTUAL_CONFIG.s3["addressing_style"]
UTF-8
Python
false
false
5,955
py
224
test_s3_storage.py
81
0.660118
0.645844
0
176
32.835227
85
samd-a/ebshare2.0
7,078,106,141,143
abb6d33646fd1d11d1ae35b73c0824866dd1945b
9d41222e8e2359d53b3f776a941675f9056cea90
/books/models.py
3ee18ec00be525814ef033c40655a31dac5de396
[]
no_license
https://github.com/samd-a/ebshare2.0
40b0a3ecc6e70a1a8f1fbe0653fd7305dabf11bd
ffa4ed4f107281095c796756711b6f57285da8b2
refs/heads/master
2021-01-10T04:00:26.210803
2015-12-11T11:09:55
2015-12-11T11:09:55
47,309,176
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models from django.contrib.auth.models import User # Create your models here. class book(models.Model): user = models.ForeignKey(User) book_title = models.CharField(max_length=100) book_author = models.CharField(max_length=60) cover = models.ImageField(upload_to='book_cover') alt_text = models.CharField(max_length=20) description = models.TextField(max_length=750) details = models.CharField(max_length=200) genre = models.CharField(max_length=20) #ideally, these would 1 non-array field with the paragraph text #current error: "need more than 1 value to unpack" #description = ArrayField(models.CharField(max_length=500)) #details = ArrayField(models.CharField(max_length=200)) def __unicode__(self): return self.book_title #class txtbook(book): # fixtures = ['books.json'] class review(models.Model): user = models.ForeignKey(User) book_review = models.ForeignKey(book) content = models.CharField(max_length=750) def __unicode__(self): return str(self.id)
UTF-8
Python
false
false
1,080
py
12
models.py
8
0.700926
0.676852
0
35
29.885714
67
zrq495/OnlineJudge
11,235,634,463,095
dca16921fdcbeac0b8272a7ccc61e6ebc515ce80
9bd23c46e3f594d9557e3c049f753b05adff2b94
/oj/core/jinja.py
fee18fd552567518e3859284c884da84db8c0123
[]
no_license
https://github.com/zrq495/OnlineJudge
26a5f865734c306f521b922ecf2c46e74d6fe905
44be892ed657f462fb441d785c8550fc144f8896
refs/heads/master
2021-01-19T22:05:27.492272
2015-11-20T09:00:40
2015-11-20T09:00:40
31,713,677
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime, timedelta from jinja2 import Markup import pygments from pygments.formatters.html import HtmlFormatter from pygments.lexers import guess_lexer from oj import app def highlight(code): try: lexer = guess_lexer(code) formatter = HtmlFormatter(linenos='table', linenostart=0) code = pygments.highlight(code, lexer, formatter) except: pass return Markup(code) def digital_to_letter(value, base='A'): try: return chr(value % 26 + ord(base)) except: return '' def time_since(dt, default='刚刚', time_format='%Y-%m-%d %H:%M'): """将 datetime 替换成字符串 ('3小时前', '2天前' 等等) 的 Jinja filter copy from https://github.com/tonyblundell/socialdump/blob/master/socialdump.py sqlite 的 CURRENT_TIMESTAMP 只能使用 UTC 时间, 所以单元测试 看到时间是8小时前的 don't panic, PostgreSQL 是有时区设定的. """ # added by jade if not dt: return '' now = datetime.now() diff = now - dt total_seconds = diff.total_seconds() if total_seconds > 0: if total_seconds < 10800: # 3 小时内 periods = ( (diff.seconds / 3600, '小时'), (diff.seconds / 60, '分钟'), (diff.seconds, '秒'), ) for period, unit in periods: if period > 0: return '%d%s前' % (period, unit) elif total_seconds < 86400 and dt.day == now.day: # 严格的今天内 return '今天' + dt.strftime('%H:%M') elif (total_seconds < 2 * 86400 and dt.day == (now - timedelta(days=1)).day): # 严格的昨天 return '昨天' + dt.strftime('%H:%M') else: return unicode(dt.strftime(time_format)) return default def convert_timedelta_to_hms(duration): seconds = int(duration.total_seconds()) sign = '-' if seconds < 0 else '' seconds = abs(seconds) hours = seconds // 3600 minutes = (seconds % 3600) // 60 seconds = (seconds % 60) return sign + str(hours), str(minutes), str(seconds) JINJA_FILTERS = { 'digital_to_letter': digital_to_letter, 'time_since': time_since, 'highlight': highlight, 'convert_timedelta_to_hms': convert_timedelta_to_hms, } @app.template_global() def get_headlines(): from oj.models import HeadlineModel return HeadlineModel.query.filter( HeadlineModel.is_display.is_(True)).all()
UTF-8
Python
false
false
2,596
py
134
jinja.py
93
0.596906
0.577769
0
87
27.229885
72
hellomeeddie/flask_saverly_api
15,814,069,603,872
8c3aa6f042990f70ef3a58c4f82812c26fdcc304
07c51b31eb3a70189ac1be3f9f0b2cef140068cc
/dan/insert.py
3678d1eb2ec2b2061d89e213ff7aff2daa1fd2d0
[]
no_license
https://github.com/hellomeeddie/flask_saverly_api
4433682788c71a10e70147a22ac719cb282962f7
0a3a11c76d517a9ccb9dd7e2f237cfebb161330c
refs/heads/master
2020-12-28T22:02:02.369035
2016-07-19T02:45:41
2016-07-19T02:45:41
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#Daniel Engelberth from py2neo import authenticate, Graph, Node, Relationship #Params: t - String, type of node being inserted (User, Goal, Budget, etc.), info - Array, array of all other info to be used to set up node properties #Returns: None #Description: Inserts a new node into the Neo4j database that corresponds with the requested data type as well as any required relationships def insert(graph, t, info): if t == "User": graph.run("CREATE (n:User {userName: {userName},firstName: {firstName},lastName: {lastName},email: {email},password: {password}})",userName=info[0],firstName=info[1],lastName=info[2],email=info[3],password=info[4]) elif t == "Goal": graph.run("CREATE (n:Goal {name: {name},amount: {amount},downpay: {downpay},term: {term},description: {description}})",name=info[0],amount=info[1],downpay=info[2],term=info[3],description=info[4]) for category in info[5]: insert(graph, "Category", category) graph.run("MATCH (a:Goal),(b:Category) WHERE a.name = {name} AND b.name = {category} CREATE (a)-[r:HAS]->(b)",name=info[0],category=category) graph.run("MATCH (a:User),(b:Goal) WHERE a.userName = {userName} AND b.name = {name} CREATE (a)-[r:HAS]->(b)",userName=info[5],name=info[0]) elif t == "Budget": graph.run("CREATE (n:Budget {name: {name},amount: {amount},startDate: {startDate},endDate: {endDate},description: {description}})",name=info[0],amount=info[1],startDate=info[2],endDate=info[3],description=info[4]) for category in info[5]: insert(graph, "Category", category) graph.run("MATCH (a:Budget),(b:Category) WHERE a.name = {name} AND b.name = {category} CREATE (a)-[r:HAS]->(b)",name=info[0],category=category) graph.run("MATCH (a:User),(b:Budget) WHERE a.userName = {userName} AND b.name = {name} CREATE (a)-[r:HAS]->(b)",userName=info[6],name=info[0]) elif t == "Wish": graph.run("CREATE (n:Wish {name: {name},purchaseLink: {purchaseLink},date: {date},description: {description}})",name=info[0],purchaseLink=info[1],date=info[2],description=info[3]) for category in info[4]: insert(graph, "Category", category) graph.run("MATCH (a:Wish),(b:Category) WHERE a.name = {name} AND b.name = {category} CREATE (a)-[r:HAS]->(b)",name=info[0],category=category) graph.run("MATCH (a:User),(b:Wish) WHERE a.userName = {userName} AND b.name = {name} CREATE (a)-[r:HAS]->(b)",userName=info[5],name=info[0]) elif t == "Transaction": graph.run("CREATE (n:Transaction{name: {name},amount: {amount},date: {date},location: {location}})",name=info[0],amount=info[1],date=info[2],location=info[3]) for category in info[4]: insert(graph, "Category", category) graph.run("MATCH (a:Transaction),(b:Category) WHERE a.name = {name} AND b.name = {category} CREATE (a)-[r:HAS]->(b)",name=info[0],category=category) insert(graph, "Merchant",info[5]) graph.run("MATCH (a:Transaction),(b:Merchant) WHERE a.name = {name} AND b.name = {merchantName} CREATE (a)-[r:HAS]->(b)",name=info[0],merchantName=info[5]) graph.run("MATCH (a:User),(b:Transaction) WHERE a.userName = {userName} AND b.name = {name} CREATE (a)-[r:HAS]->(b)",userName=info[7],name=info[0]) elif t == "Bill": graph.run("CREATE (n:Bill {name: {name},amount: {amount},startDate: {startDate},endDate: {endDate},description: {description},freq: {freq}})",name=info[0],amount=info[1],startDate=info[2],endDate=info[3],description=info[4],freq=info[5]) for category in info[6]: insert(graph, "Category", category) graph.run("MATCH (a:Bill),(b:Category) WHERE a.name = {name} AND b.name = {category} CREATE (a)-[r:HAS]->(b)",name=info[0],category=category) graph.run("MATCH (a:User),(b:Bill) WHERE a.userName = {userName} AND b.name = {name} CREATE (a)-[r:HAS]->(b)",userName=info[7],name=info[0]) """elif t == "Tag": graph.run("MERGE (t:Tag { name: {name} })",name=info)""" elif t == "Merchant": graph.run("MERGE (t:Merchant { name: {name} })",name=info) elif t == "Category": graph.run("MERGE (c:Category {name: {name})",name=info) else: raise ValueError("t must be a type of node {'User','Goal','Transaction', etc.")
UTF-8
Python
false
false
4,372
py
3
insert.py
3
0.629918
0.617566
0
50
85.48
245
Nikkuniku/AtcoderProgramming
9,826,885,179,047
550e6f1f75bc4e863673a2cdfa8a915679b2bfd2
63b0fed007d152fe5e96640b844081c07ca20a11
/くじかつ/よるかつ50/C.py
999de7c70eed8d6057ad3477f79c698c5172ce11
[]
no_license
https://github.com/Nikkuniku/AtcoderProgramming
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
fbaf7b40084c52e35c803b6b03346f2a06fb5367
refs/heads/master
2023-08-21T10:20:43.520468
2023-08-12T09:53:07
2023-08-12T09:53:07
254,373,698
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
n=int(input()) h=list(map(int,input().split())) h_min=min(h) for i in range(n): h[i]-=h_min for j in range(n): if h[j]==0: print(h)
UTF-8
Python
false
false
155
py
2,078
C.py
2,067
0.509677
0.503226
0
13
10.769231
32
49ers-DB/Atlanta-Movie
19,542,101,197,174
d299880c948280a90a990cc98d0319ca15f61979
33ef7d427278c371b101b047459d7c669d5d8eff
/app/http/api/endpoints.py
0ae14ab032fd9d54570f94d7c158e7234d37e321
[ "MIT" ]
permissive
https://github.com/49ers-DB/Atlanta-Movie
ac808f7c8f07ca2cc3f28b7f2af877bf7647af74
1cff0760ba8b57831dd87f9d216b7b3ae4cac6e2
refs/heads/master
2021-07-08T05:09:01.582449
2019-11-30T04:06:46
2019-11-30T04:06:46
221,286,389
2
0
MIT
false
2021-01-05T17:19:22
2019-11-12T18:37:12
2020-01-10T01:05:52
2021-01-05T17:19:20
799
1
0
22
JavaScript
false
false
from middleware import login_required, admin_only from flask import Flask, json, g, request, render_template from flask_cors import CORS import pymysql.cursors from app.util.custom_jwt import create_access_token from app.services.LoginService import LoginService from app.services.ManagerService import ManagerService from app.services.RegisterService import RegisterService from app.services.DropDownService import DropDownService from app.services.UserService import UserService from app.services.CustomerService import CustomerService from app.services.AdminService import AdminService from app.services.DBService import db_reset from logging.config import dictConfig import logging import sys dictConfig({ 'version': 1, 'formatters': {'default': { 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s', }}, 'handlers': {'wsgi': { 'class': 'logging.StreamHandler', 'stream': 'ext://flask.logging.wsgi_errors_stream', 'formatter': 'default' }}, 'root': { 'level': 'DEBUG', 'handlers': ['wsgi'] } }) logging.basicConfig(level=logging.DEBUG) handler = logging.StreamHandler(sys.stdout) root = logging.getLogger() root.addHandler(handler) app = Flask(__name__, static_folder="build/static", template_folder="build") CORS(app) app.config['EXPLAIN_TEMPLATE_LOADING'] = True db_reset() # create services login_service = LoginService() register_service = RegisterService() manager_service = ManagerService() drop_down_service = DropDownService() user_service = UserService() customer_service = CustomerService() admin_service = AdminService() #-----------Main------------ @app.route('/', defaults={'path': ''}) @app.route('/<path:path>') def index(path): return render_template('index.html') #------------LOGIN------------ @app.route('/userLogin', methods=['POST']) def userLogin(): data = request.get_json() user = data['user'] try: success = login_service.login(user) if success: del user['password'] access_token = create_access_token(identity=data) user['jwt'] = access_token return json_response({'ok': True, 'data': user}) except Exception as e: print("Exception", e) return json_response({'message': 'Bad request parameters'}, 400) #-------REGISTRATIONS-------- #user @app.route('/userRegister', methods=['POST']) def userRegister(): data = request.get_json() user = data['user'] response = json_response({'message': 'Bad request parameters'}, 400) try: success = register_service.registerUser(user) print(success) if success: response = json_response({'ok': True, 'data': user}) else: response = json_response({'message': 'username taken'}, 401) except: response = json_response({'message': 'Bad request parameters'}, 400) print("Failed to insert record") return response #customer @app.route('/customerRegister', methods=['POST']) def customerRegister(): data = request.get_json() user = data try: response = register_service.registerCustomer(user) return json_response(response) except pymysql.InternalError as e: print(e) print("Failed to insert record") return json_response({'message': 'Bad request parameters'}, 400) #manager @app.route('/managerRegister', methods=['POST']) def managerRegister(): data = request.get_json() user = data try: response = register_service.registerManager(user) return json_response(response) except pymysql.InternalError as e: print(e) print("Failed to insert record") return json_response({'message': 'Bad request parameters'}, 400) #managerCustomer @app.route('/managerCustomerRegister', methods=['POST']) def managerCustomerRegister(): data = request.get_json() user = data try: response = register_service.registerManagerCustomer(user) return json_response(response) except pymysql.InternalError as e: print(e) print("Failed to insert record") return json_response({'message': 'Bad request parameters'}, 400) #-------DropDownService--------- @app.route('/getCompanies', methods=['GET']) def getCompanies(): response = drop_down_service.CompanyDropDown() return json_response(response) @app.route('/movies', methods=['GET']) def getMovies(): response = drop_down_service.MovieDropDown() return json_response(response) @app.route('/theaters/<string:comName>', methods=['GET']) @login_required def getTheaters(comName): theaters = drop_down_service.TheaterDropDown() return json_response({'ok': True, 'theaters': theaters}) @app.route('/creditcard', methods=['GET']) @login_required def getCreditCardNumbers(): username = g.user['username'] response = drop_down_service.getCreditCardNumbers(username) return json_response(response) @app.route('/managers', methods=['GET']) @admin_only def get_managers(): response = drop_down_service.ManagerDropDown() return json_response(response) #----------UserService-------------------- @app.route('/exploreTheater', methods=['POST']) @login_required def explore_theater(): data = request.get_json() print(data) query_data = user_service.ExploreTheater(data) return json_response({'ok': True, 'theaters': query_data}) @app.route('/logVisit', methods=['POST']) @login_required def log_visit(): data = request.get_json() user = g.user['username'] user_service.LogVisit(user, data) return json_response({'ok': True}) #--------CustomerService------------------- @app.route('/exploreMovie', methods=['POST']) @login_required def explore_movie(): data = request.get_json() query_data = customer_service.ExploreMovie(data) return json_response({'ok': True, 'moviePlays': query_data}) @app.route('/viewMovie', methods=['POST']) @login_required def view_movie(): data = request.get_json() username = g.user['username'] resp = customer_service.ViewMovie(username, data) if resp is not None: return json_response({'ok': True, 'data':resp}) return json_response({'ok': True}) #----------ManagerService----------------- @app.route('/theaterOverview', methods=['POST']) @login_required def get_theater_overview(): data = request.get_json() user = g.user['username'] response = manager_service.TheaterOverview(user, data) return json_response({'ok': True, "data": response}) @app.route('/GetVisitHistory', methods=['POST']) @login_required def get_visit_history(): data = request.get_json() user = g.user['username'] data = user_service.VisitHistory(user, data) return json_response({'data': data}) @app.route('/moviePlay', methods=['POST']) @login_required def ScheduleMovie(): data=request.get_json() user=g.user['username'] manager_service.ScheduleMovie(user, data) return json_response({'ok': True}) #------------Admin Service------------- @app.route('/manageCompany', methods=['POST']) @login_required @admin_only def manage_company(): data = request.get_json() response = admin_service.ManageCompany(data) return json_response(response) @app.route('/filterUser', methods=['POST']) @login_required @admin_only def filter_user(): data = request.get_json() response = admin_service.FilterUser(data) return json_response({"data":response}) @app.route('/approveUser', methods=['POST']) @login_required @admin_only def approve_user(): data = request.get_json() admin_service.ApproveUser(data) return json_response({"ok":True}) @app.route('/declineUser', methods=['POST']) @login_required @admin_only def decline_user(): data = request.get_json() admin_service.DeclineUser(data) return json_response({"ok":True}) @app.route('/theater', methods=['POST']) @login_required @admin_only def create_theater(): data = request.get_json() admin_service.CreateTheater(data) return json_response({"ok":True}) @app.route('/companyDetail/<string:name>', methods=['GET']) @login_required @admin_only def company_detail(name): response = admin_service.CompanyDetail(name) return json_response(response) @app.route("/example/<int:param_1>", methods=['GET']) @login_required def example_endpoint(param_1): print(param_1) user = g.user # response = json_response({'userType': 'user'}, 200) # userType = login_service.findUserType(user['username']) # response = json_response({'userType': userType}, 200) return json_response({'ok':True}) @app.route("/user", methods=['GET']) @login_required def get_user_type(): user = g.user response = json_response({'userType': 'user'}, 200) userType = login_service.findUserType(user['username']) response = json_response({'userType': userType}, 200) return response @app.route('/createMovie', methods=['POST']) @login_required @admin_only def create_movie(): data = request.get_json() resp = admin_service.CreateMovie(data) return json_response({"data": resp}) #----------CustomerService-------------------- @app.route('/viewHistory', methods=['POST']) @login_required def viewHistory(): user = g.user['username'] print(user) data = customer_service.ViewHistory(user) return json_response({'data': data}) def json_response(payload, status_code=200): return json.dumps(payload), status_code, {'Content-type': 'application/json'}
UTF-8
Python
false
false
9,235
py
61
endpoints.py
51
0.685328
0.680996
0
364
24.370879
80
ogrisel/probability
2,164,663,550,020
6ab68426a4274241e0bbe0ea6f151d41279ea6cd
3be42b83a15d022f5863c96ec26e21bac0f7c27e
/tensorflow_probability/python/internal/dtype_util.py
7c855239b7722135d0970333e0ddec8e30f23407
[ "Apache-2.0" ]
permissive
https://github.com/ogrisel/probability
846f5c13cddee5cf167b215e651b7479003f15d2
8f67456798615f9bf60ced2ce6db5d3dba3515fe
refs/heads/master
2022-11-09T10:53:23.000918
2020-07-01T23:16:03
2020-07-01T23:17:25
276,580,359
2
1
Apache-2.0
true
2020-07-02T07:37:58
2020-07-02T07:37:57
2020-07-01T23:17:33
2020-07-02T07:32:26
84,976
0
0
0
null
false
false
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utility functions for dtypes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import tensorflow.compat.v2 as tf __all__ = [ 'as_numpy_dtype', 'assert_same_float_dtype', 'base_dtype', 'base_equal', 'common_dtype', 'is_bool', 'is_complex', 'is_floating', 'is_integer', 'max', 'min', 'name', 'real_dtype', 'size', ] SKIP_DTYPE_CHECKS = False def as_numpy_dtype(dtype): """Returns a `np.dtype` based on this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'as_numpy_dtype'): return dtype.as_numpy_dtype return dtype def base_dtype(dtype): """Returns a non-reference `dtype` based on this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'base_dtype'): return dtype.base_dtype return dtype def base_equal(a, b): """Returns `True` if base dtypes are identical.""" return base_dtype(a) == base_dtype(b) def common_dtype(args_list, dtype_hint=None): """Returns explict dtype from `args_list` if there is one.""" dtype = None seen = [] for a in tf.nest.flatten(args_list): if hasattr(a, 'dtype') and a.dtype: dt = as_numpy_dtype(a.dtype) seen.append(dt) else: seen.append(None) continue if dtype is None: dtype = dt elif dtype != dt: if SKIP_DTYPE_CHECKS: dtype = (np.ones([2], dtype) + np.ones([2], dt)).dtype else: raise TypeError( 'Found incompatible dtypes, {} and {}. Seen so far: {}'.format( dtype, dt, seen)) return dtype_hint if dtype is None else base_dtype(dtype) def convert_to_dtype(tensor_or_dtype, dtype=None, dtype_hint=None): """Get a dtype from a list/tensor/dtype using convert_to_tensor semantics.""" if tensor_or_dtype is None: return dtype or dtype_hint # Tensorflow dtypes need to be typechecked if tf.is_tensor(tensor_or_dtype): dt = base_dtype(tensor_or_dtype.dtype) elif isinstance(tensor_or_dtype, tf.DType): dt = base_dtype(tensor_or_dtype) # Numpy dtypes defer to dtype/dtype_hint elif isinstance(tensor_or_dtype, np.ndarray): dt = base_dtype(dtype or dtype_hint or tensor_or_dtype.dtype) elif np.issctype(tensor_or_dtype): dt = base_dtype(dtype or dtype_hint or tensor_or_dtype) else: # If this is a Python object, call `convert_to_tensor` and grab the dtype. # Note that this will add ops in graph-mode; we may want to consider # other ways to handle this case. dt = tf.convert_to_tensor(tensor_or_dtype, dtype, dtype_hint).dtype if not SKIP_DTYPE_CHECKS and dtype and not base_equal(dtype, dt): raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt)) return dt def is_bool(dtype): """Returns whether this is a boolean data type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'is_bool'): return dtype.is_bool # We use `kind` because: # np.issubdtype(np.uint8, np.bool) == True. return np.dtype(dtype).kind == 'b' def is_complex(dtype): """Returns whether this is a complex floating point type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'is_complex'): return dtype.is_complex return np.issubdtype(np.dtype(dtype), np.complexfloating) def is_floating(dtype): """Returns whether this is a (non-quantized, real) floating point type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'is_floating'): return dtype.is_floating return np.issubdtype(np.dtype(dtype), np.floating) def is_integer(dtype): """Returns whether this is a (non-quantized) integer type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'is_integer') and not callable(dtype.is_integer): return dtype.is_integer return np.issubdtype(np.dtype(dtype), np.integer) def max(dtype): # pylint: disable=redefined-builtin """Returns the maximum representable value in this data type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'max') and not callable(dtype.max): return dtype.max use_finfo = is_floating(dtype) or is_complex(dtype) return np.finfo(dtype).max if use_finfo else np.iinfo(dtype).max def min(dtype): # pylint: disable=redefined-builtin """Returns the minimum representable value in this data type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'min') and not callable(dtype.min): return dtype.min use_finfo = is_floating(dtype) or is_complex(dtype) return np.finfo(dtype).min if use_finfo else np.iinfo(dtype).min def name(dtype): """Returns the string name for this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'name'): return dtype.name if hasattr(dtype, '__name__'): return dtype.__name__ return str(dtype) def size(dtype): """Returns the number of bytes to represent this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'size') and hasattr(dtype, 'as_numpy_dtype'): return dtype.size return np.dtype(dtype).itemsize def real_dtype(dtype): """Returns the dtype of the real part.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'real_dtype'): return dtype.real_dtype # TODO(jvdillon): Find a better way. return np.array(0, as_numpy_dtype(dtype)).real.dtype def _assert_same_base_type(items, expected_type=None): r"""Asserts all items are of the same base type. Args: items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`, `Operation`, or `IndexedSlices`). Can include `None` elements, which will be ignored. expected_type: Expected type. If not specified, assert all items are of the same base type. Returns: Validated type, or none if neither expected_type nor items provided. Raises: ValueError: If any types do not match. """ original_expected_type = expected_type mismatch = False for item in items: if item is not None: item_type = base_dtype(item.dtype) if expected_type is None: expected_type = item_type elif expected_type != item_type: mismatch = True break if mismatch: # Loop back through and build up an informative error message (this is very # slow, so we don't do it unless we found an error above). expected_type = original_expected_type original_item_str = None get_name = lambda x: x.name if hasattr(x, 'name') else str(x) for item in items: if item is not None: item_type = base_dtype(item.dtype) if not expected_type: expected_type = item_type original_item_str = get_name(item) elif expected_type != item_type: raise ValueError( '{}, type={}, must be of the same type ({}){}.'.format( get_name(item), item_type, expected_type, ((' as {}'.format(original_item_str)) if original_item_str else ''))) return expected_type # Should be unreachable else: return expected_type def assert_same_float_dtype(tensors=None, dtype=None): """Validate and return float type based on `tensors` and `dtype`. For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all `tensors` are the same type, validates that type is `dtype` (if supplied), and returns the type. Type must be a floating point type. If neither `tensors` nor `dtype` is supplied, the function will return `dtypes.float32`. Args: tensors: Tensors of input values. Can include `None` elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither `tensors` nor `dtype` is supplied, or result is not float, or the common type of the inputs is not a floating point type. """ if tensors: dtype = _assert_same_base_type(tensors, dtype) if not dtype: dtype = tf.float32 elif not is_floating(dtype): raise ValueError('Expected floating point type, got {}.'.format(dtype)) return dtype
UTF-8
Python
false
false
8,725
py
149
dtype_util.py
138
0.662235
0.660287
0
275
30.727273
80
Raul-Pinheiro/projetoPessoal-controleDeEstoqueLojaVirtual
7,327,214,229,338
24b2a8cd753446b0ace893d4d053057301eaf720
5b5450b16fbde30b6677386cc5b6b54d24cb7bd2
/apps/controlEstoque/migrations/0009_auto_20200908_1824.py
5cd4b14b95b6a1f6794d68ad3cc6a646646d42ea
[]
no_license
https://github.com/Raul-Pinheiro/projetoPessoal-controleDeEstoqueLojaVirtual
c82e74fc6f1cbf08a26560171cfc31cd9f8f80cf
54afb55114e482aaa4e1b4ccbc0c1baddcd448a2
refs/heads/master
2023-01-22T16:39:53.818655
2020-12-08T16:20:58
2020-12-08T16:20:58
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 3.1.1 on 2020-09-08 21:24 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('controlEstoque', '0008_auto_20200908_1823'), ] operations = [ migrations.AlterField( model_name='produtos', name='data_ultima_compra', field=models.DateTimeField(default=datetime.datetime(2020, 9, 8, 18, 24, 8, 670532)), ), migrations.AlterField( model_name='produtos_loja', name='foto_produto', field=models.ImageField(blank=True, upload_to='fotos/%d/%m/%Y/loja/'), ), ]
UTF-8
Python
false
false
668
py
70
0009_auto_20200908_1824.py
46
0.595808
0.523952
0
24
26.833333
97
GianfrancoJara/Exp3Backend_JaraGonzalez_004D
19,138,374,288,920
c5cd41ec07ab78ccf8ecdd78ae47ec85f0d9bda8
033b5305f992c7e06df8563e1e69c600c94e90bd
/SoporteIT/SoporteIT/core/migrations/0002_solicitud_comentario.py
95e1f3026e569c2a28f1ed3cd95d8895680d48ae
[]
no_license
https://github.com/GianfrancoJara/Exp3Backend_JaraGonzalez_004D
ac95fa2ec4cfa108a8da647042a93573c38a9795
b0950e70487269a587c3a53f24aa3cbc2d766a72
refs/heads/main
2023-06-07T11:50:07.967530
2021-06-22T02:04:14
2021-06-22T02:04:14
378,295,207
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 3.2.3 on 2021-06-20 00:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.AddField( model_name='solicitud', name='comentario', field=models.CharField(max_length=512, null=True, verbose_name='Comentario'), ), ]
UTF-8
Python
false
false
416
py
12
0002_solicitud_comentario.py
7
0.598558
0.545673
0
18
22.111111
89
christofsteel/mtgman
3,298,534,912,378
09a81e6923094022cd5fad1e4ac1afbeef4c1003
07ea87b4d3ec31f39452cc95c166b48c2c3abc1a
/mtgman/imports/faces.py
f80c78dfc7a1162f35ee80f400aece2187dc9ab3
[]
no_license
https://github.com/christofsteel/mtgman
ba5793cebc93cc0eba8d391b57f3b31c98067e6e
acaa2a83f5964845c0273778824c223486f233f0
refs/heads/master
2020-09-13T03:31:01.581869
2019-12-16T05:40:55
2019-12-16T05:40:55
222,644,442
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from . import create_dict from .card import get_card_from_sf from ..model import CardFacePrinting, CardFaceBase, CardFace def get_db_card_face(name, session): return session.query(CardFace).filter(CardFace.name == name).first() def get_db_card_face_from_sf(face, session): return get_db_card_face(face["name"], session) #faces = [] #if "card_faces" in e: # for face in e["card_faces"]: # faces.append(get_db_card_face(face["name"], session)) #return faces def get_card_face_from_sf(face, e, session): card_face = get_db_card_face_from_sf(face, session) if card_face is not None: return card_face card_face = add_card_face(face, e, session) session.commit() return card_face def add_card_face(face, e, session): card = get_card_from_sf(e, session) card_face = create_card_face(face, card) session.add(card_face) return card_face def create_card_face(face, card): fields = ["name", "mana_cost", "type_line", "oracle_text"] fields_int = ["power", "toughness"] lists=["colors", "color_indicator"] fields_uuid = [] renames = {"power": "power_str", "toughness": "toughness_str"} if type(card) is int: custom = {"card_id": card} else: custom = {"card": card} dict_all = create_dict(face, fields=fields, lists=lists, fields_int=fields_int, fields_uuid=fields_uuid, renames=renames, custom=custom) return CardFace(**dict_all) def create_card_face_printing(face, card_face_base, printing): fields = ["printed_name", "printed_text", "printed_type_line"] objects = { "image_uris": { "png": ("image_uri_png", lambda x: x) , "border_crop": ("image_uri_border_crop", lambda x: x) , "art_crop": ("image_uri_art_crop", lambda x: x) , "large": ("image_uri_large", lambda x: x) , "normal": ("image_uri_normal", lambda x: x) , "small": ("image_uri_small", lambda x: x) } } custom = {} if type(card_face_base) is int: custom["card_face_base_id"] = card_face_base else: custom["card_face_base"] = card_face_base if type(printing) is int: custom["card_printing_id"] = printing else: custom["card_printing"] = printing dict_all = create_dict(face, fields=fields, objects=objects, custom=custom) return CardFacePrinting(**dict_all) def makeCardFacePrinting(e, card_base_face, printing, session): card_face_printing = session.query(CardFacePrinting)\ .filter(CardFacePrinting.card_face_base == card_base_face)\ .filter(CardFacePrinting.card_printing == printing).first() if card_face_printing is None: card_face_printing = create_card_face_printing(e, card_base_face, printing) session.add(card_face_printing) return card_face_printing def create_card_face_base(face, card_face, basecard): fields = ["artist", "flavor_text", "watermark"] fields_uuid = ["artist_id", "illustration_id"] custom = {} if type(card_face) is int: custom["card_face_id"] = card_face else: custom["card_face"] = card_face if type(basecard) is int: custom["basecard_id"] = basecard else: custom["basecard"] = basecard dict_all = create_dict(face, fields=fields, fields_uuid=fields_uuid, custom = custom) return CardFaceBase(**dict_all) def makeCardBaseFace(face, card_face, basecard, session): card_base_face = session.query(CardFaceBase)\ .filter(CardFaceBase.card_face == card_face)\ .filter(CardFaceBase.basecard == basecard).first() if card_base_face is None: card_base_face = create_card_face_base(face, card_face, basecard) session.add(card_base_face) return card_base_face
UTF-8
Python
false
false
3,794
py
19
faces.py
17
0.633105
0.633105
0
107
34.383178
140
Aswin-Sureshumar/Python-Programs
14,001,593,400,178
0103eaec5aed67e745956b0de8df3f3f3b654ff6
b42c827e57b6c24251dedf0894ba3a97eb876b7c
/list gen.py
f7e04b92baf5fb49a18e00903c340c02df6f359e
[]
no_license
https://github.com/Aswin-Sureshumar/Python-Programs
77f20bacefc32307b60a00e9345cae95dc14185f
0387eb732e1b43995d161b5088b49b1155405411
refs/heads/master
2022-12-22T11:53:04.864017
2020-09-22T10:23:57
2020-09-22T10:23:57
283,938,006
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
n=int(input(" enter the elements req ")) list=[] for i in range(0,n): a=int(input()) list.append(a) print(list)
UTF-8
Python
false
false
124
py
76
list gen.py
76
0.596774
0.58871
0
6
19
40
jjsjann123/cs526_proj2_enhance
13,554,916,820,038
31854d3d854c4e2e93d021763022963544d50b4a
5b4a2fb592f39e07bf5de619071d3e75b7aa3cb0
/multiples.py
61a7bc8d9ab8412158f6ee979b768c6d649466b4
[]
no_license
https://github.com/jjsjann123/cs526_proj2_enhance
70dcb6c03410b50bfc059f2ec5dbacd81fd535d7
7c661664937d4c056b14bbd62bafe3a0a484f684
refs/heads/master
2020-03-28T18:57:39.803999
2013-12-09T02:00:25
2013-12-09T02:00:25
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from omega import * from cyclops import * from math import * from euclid import * from fun import * class multiples(object): global stellarColorMap orbitScale = Uniform.create('orbitScale', UniformType.Float, 1) radiusScale = Uniform.create('radiusScale', UniformType.Float, 1) orbitRatio = Uniform.create('orbit_ratio', UniformType.Float, 1) radiusRatio = Uniform.create('radius_ratio', UniformType.Float, 1) #glowPower = Uniform.create('unif_Glow', UniformType.Float, 1) #starColor = Uniform.create('star_color', UniformType.Color, 1) cutOffX = Uniform.create('cutoff_x', UniformType.Float, 1) cutOffY = Uniform.create('cutoff_y', UniformType.Float, 1) offPanelSize = Uniform.create('off_size', UniformType.Float, 1) multipleScale = 0.05 height = 5.0 width = 40.0 offsize = 0.2 orbitRatioFloat = 10.0 radiusRatioFloat = 4.0 ratioRadius = 20.0 fontSize = 0.7 @staticmethod def getData(str, type, default): if str == None: return default else: return type(str) @classmethod def initialize(cls): multipleScale = cls.multipleScale width = cls.width * multipleScale height = cls.height * multipleScale cls.orbitScale.setFloat(1.0) cls.orbitRatio.setFloat(cls.orbitRatioFloat) cls.radiusRatio.setFloat(cls.radiusRatioFloat) cls.radiusScale.setFloat(1.0) #cls.glowPower.setFloat(20) #cls.starColor.setColor(Color(1, 0, 0, 1)) cls.cutOffX.setFloat(width - cls.offsize*multipleScale) cls.cutOffY.setFloat(height - cls.offsize*multipleScale) cls.offPanelSize.setFloat(cls.offsize * cls.multipleScale) geom = ModelGeometry.create('stellar') v1 = geom.addVertex(Vector3(0, height/2, -0.01)) geom.addColor(Color(0,1,0,0)) v2 = geom.addVertex(Vector3(0, -height/2, -0.01)) geom.addColor(Color(0,0,0,0)) v3 = geom.addVertex(Vector3(width, height/2, -0.01)) geom.addColor(Color(1,1,0,0)) v4 = geom.addVertex(Vector3(width, -height/2, -0.01)) geom.addColor(Color(1,0,0,0)) geom.addPrimitive(PrimitiveType.TriangleStrip, 0, 4) getSceneManager().addModel(geom) shaderPath = "./shaders/" multipleDraw = ProgramAsset() multipleDraw.name = "background" multipleDraw.vertexShaderName = shaderPath + "background.vert" multipleDraw.fragmentShaderName = shaderPath + "background.frag" getSceneManager().addProgram(multipleDraw) starDraw = ProgramAsset() starDraw.name = "planet" starDraw.vertexShaderName = shaderPath + "planet.vert" starDraw.fragmentShaderName = shaderPath + "planet.frag" starDraw.geometryOutVertices = 4 starDraw.geometryShaderName = shaderPath + "/planet.geom" starDraw.geometryInput = PrimitiveType.Points starDraw.geometryOutput = PrimitiveType.TriangleStrip getSceneManager().addProgram(starDraw) def setHighlight(self, bool): if bool: self.highlight.setInt(2) else: self.highlight.setInt(0) def __init__(self, system): multiple = StaticObject.create('stellar') multiple.setEffect("background -t") self.multiple = multiple self.multiple.setSelectable(True) self.starRadius = system['star'][0]['radius'] # This is supposed to be set to the parentNode for it to attach to. self.parentNode = SceneNode.create('stellar_'+system['stellar']['name']) self.parentNode.addChild(multiple) multiple.getMaterial().addUniform('unif_Glow', UniformType.Float).setFloat(1/self.starRadius*self.ratioRadius) stellar = system['stellar'] distance = self.getData(stellar['distance'], float, 100.0) name = self.getData(stellar['name'], str, 'anonym') spectraltype = self.getData(system['star'][0]['spectraltype'], str, 'G') (min, max) = habitRange[spectraltype] material = multiple.getMaterial() self.highlight = Uniform.create('highlight', UniformType.Int, 1) self.highlight.setInt(0) material.addUniform('star_color', UniformType.Color).setColor(stellarColorMap[spectraltype]) material.attachUniform(self.orbitScale) material.attachUniform(self.cutOffX) material.attachUniform(self.orbitRatio) material.attachUniform(self.highlight) material.addUniform('hab_min', UniformType.Float).setFloat(min*self.multipleScale) material.addUniform('hab_max', UniformType.Float).setFloat(max*self.multipleScale) multipleScale = self.multipleScale width = self.width * multipleScale height = self.height * multipleScale #info = 'Stellar System: ' + name + ' Distance: ' + str(round(distance,1)) info = name + ' distance from earth ' + str(round(distance,1)) t = Text3D.create( 'fonts/arial.ttf', self.fontSize * self.multipleScale, info ) t.setFixedSize(False) t.setFontResolution(120) t.setPosition(Vector3(-0.5, height/2, 0)) t.setPosition(Vector3(-0.5, height/2, 0)) self.parentNode.addChild(t) planets = system['planets'] numOfPlanets = len(planets) geom = ModelGeometry.create(name) index = 0 for planet in planets: geom.addVertex(Vector3(self.multipleScale * self.getData(planet['semimajoraxis'], float, 1), 0, 0.01)) geom.addColor(Color(discoveryMethod[planet['discoverymethod']], numOfPlanets, index, self.multipleScale * self.getData(planet['radius'], float, 0.1))) # pName = planet['name'] # print pName index += 1 # if name in textureMap: # obj.setEffect("textured -d ./model/" + name + ".jpg") # else: # obj.setEffect("textured -d " + randomTextureMap[hash_string(name,len(randomTextureMap))] ) # multiple.getMaterial().setDiffuseTexture( geom.addVertex(Vector3(width, 0., 0.01)) geom.addColor(Color(10.0, 0.0, 0.0, 0.0)) geom.addPrimitive(PrimitiveType.Points, 0, numOfPlanets+1) getSceneManager().addModel(geom) planetSystem = StaticObject.create(name) planetSystem.setEffect("planet -t") material = planetSystem.getMaterial() material.attachUniform(self.orbitScale) material.attachUniform(self.radiusScale) material.attachUniform(self.cutOffX) material.attachUniform(self.cutOffY) material.attachUniform(self.offPanelSize) material.attachUniform(self.orbitRatio) material.attachUniform(self.radiusRatio) multiple.addChild(planetSystem)
UTF-8
Python
false
false
6,007
py
34
multiples.py
17
0.736141
0.713334
0
157
37.267516
153
rogerh2/CryptoNeuralNet
1,271,310,336,432
a683168698beccf95893c48afeedde472c167fc8
a632f8e1faf3ae92608420f697673fc6485d2ef7
/CryptoBot/CryptoBotUnitTests/CryptoFillsModelUnitTests.py
497051f71b8d12180f1557eba650a9996ad6b41e
[]
no_license
https://github.com/rogerh2/CryptoNeuralNet
b70a7467d939db8836a04cc516747b780ab1b8eb
e2a14280b06a92d6822c50f53175ae33aad1e8a1
refs/heads/master
2022-12-09T08:55:54.592354
2020-06-08T02:37:29
2020-06-08T02:37:29
130,617,552
2
0
null
false
2022-12-08T01:09:08
2018-04-22T23:52:13
2020-06-08T02:37:55
2022-12-08T01:09:07
12,759
2
0
26
Python
false
false
import unittest import pandas as pd import numpy as np import CryptoBot.CryptoForecast as cf class CryptoFillsModelTestCase(unittest.TestCase): data_obj = cf.FormattedCoinbaseProData(historical_order_books_path=None, historical_fills_path=None) def test_does_create_formatted_input_data_with_one_order_book_and_no_fills(self): # This is needed for live runs and backtests historical_order_books_path = '/Users/rjh2nd/PycharmProjects/CryptoNeuralNet/CryptoBot/CryptoBotUnitTests/' \ 'UnitTestData/SYM_historical_order_books_20entries.csv' historical_order_books = pd.read_csv(historical_order_books_path) order_books = historical_order_books.iloc[-1::] self.data_obj.historical_order_books = order_books.reset_index(drop=True) data_dict = self.data_obj.format_data('forecast') data = data_dict['input'] self.assertFalse(np.isnan(data).any()) self.assertEqual(len(data[0, ::]), 120) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
1,053
py
49
CryptoFillsModelUnitTests.py
46
0.687559
0.679962
0
26
39.5
117
charulagrl/DataStructures-Algorithms
18,769,007,105,711
bb58c61a17bc572a519c2302c4096eb06f0a0a37
83a6fd80c8dd85824e7fbae07933be9a785ee18d
/CrackingTheCodingInterview/Arrays-and-Strings/check_if_string_contains_unique_characters.py
852b993e132edd9e8037b4d589ab06dc40883ff8
[]
no_license
https://github.com/charulagrl/DataStructures-Algorithms
f81b667eefedd24231342c1f11faeee9e94fdc41
ab97ba1e09488420042946e9655111fa438c94d9
refs/heads/master
2016-09-21T14:40:13.248958
2016-09-01T10:42:24
2016-09-01T10:42:24
36,951,364
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: UTF-8 -*- ''' Problem: Implement an algorithm to determine if a string has all unique characters. Naive Approach: Loop through each character in the string and check in the entire string if that character is present. If even one such character found, return False. Time complexity: O(n*2) where n is the length of the string. Approach 2: Sort the string first and loop over the string and compare each character with its next character, if there is one such character, it returns false. Time complexity: Sorting O(n*logn) and Looping over string O(n). So the complexity will come down to O(n*logn) Approach 3: Store each character in a hash_map with key as each character and if any character is already found in hash_map return false. Time complexity: O(n) to loop over string Space Complexity: O(n) to store n characters ''' # Check if string has unique characters using hash map def unique_character_string(s): # Declaring hash_map using python dictionaries hash_map = {} for i in s: if i in hash_map: return False else: hash_map[i] = True return True
UTF-8
Python
false
false
1,141
py
9
check_if_string_contains_unique_characters.py
8
0.718668
0.715162
0
39
28.25641
122
keerthanakumar/contest
15,573,551,434,214
c98879c04bae54ce5d87e3d9db912bf2a6904521
f25bd4cd35b31289e06159034065d16faf094eaa
/contest/teams/2PacV5_1/factory.py
eddc6816f135b28e67ea34dd8c3b6724edac22c6
[]
no_license
https://github.com/keerthanakumar/contest
28dc02609a54f40d36156545f3cf9df18acf6c11
3d29c4aa83232ff171051d7197c402323845c526
refs/heads/master
2021-01-01T18:17:08.329322
2014-04-29T18:47:55
2014-04-29T18:47:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from captureAgents import AgentFactory import distanceCalculator import random, time, util from game import Directions import game #our code from agent import * from offenseRole import * from defenseRole import * from inference import * import search from myUtil import * class MSBFactory(AgentFactory): def __init__(self, isRed, **kwArgs): AgentFactory.__init__(self, isRed) print "factory __init__ called" #for emergency timeout prevention self.totalRuntime = 0 self.nRuntimeSamples = 0 self.emergencyScale = 1 self.args = kwArgs self.agents = {} #this lists which MSBAgentRoles we'll assign to new agents self.availableRoles = util.Queue() if not self.args["offenseOnly"]: self.availableRoles.push(MSBDefensiveAgentRole) self.availableRoles.push(MSBOffensiveAgentRole) self.initializeForNewGame() # this is (annoyingly) separate from __init__ because the factory outlasts a single game when the -n option is used def initializeForNewGame(self): print "initializeForNewGame called" self.distancer = None self.pathCache = {} self.walls = [] self.legalPositions = [] self.tracking = None self.gameTime = -1 self.lastDeath = -1 self.lastSwap = -1 def getAgent(self, index): print "factory.getAgent called for agent %d" % index newAgent = MSBAgent(index, self) # assign this new agent a role if not self.args["doNothing"]: newAgentRole = self.availableRoles.pop() self.availableRoles.push(newAgentRole) newAgent.role = newAgentRole() self.agents[index] = newAgent return newAgent def removeDeadAgents(self, gameState): dead = [] for agentIndex in self.agents: if agentIndex >= gameState.getNumAgents(): dead.append(agentIndex) for agentIndex in dead: del self.agents[agentIndex] def initTracking(self, index, gameState): "Initializes inference modules for each enemy agent" #already inited if self.tracking is not None: return agent = self.agents[index] opponents = agent.getOpponents(gameState) self.tracking = {} for opp in opponents: tracker = ExactInference(MSBSimulatedEnemyAgent(opp, self.getDistancer(gameState), self.args["uniformEnemySimulation"])) tracker.failOnEmpty = self.args["failOnEmptyDistribution"] tracker.initialize(gameState) tracker.initializeSpecific(gameState.getInitialAgentPosition(opp)) self.tracking[opp] = tracker def notifyEaten(self, gameState, enemyIndex): tracker = self.tracking[enemyIndex] tracker.initializeSpecific(gameState.getInitialAgentPosition(enemyIndex)) def maybeSwapRoles(self, index): # if we don't have exactly 2 agents or we swapped recently, don't swap if len(self.agents) != 2 or self.gameTime - self.lastSwap < 10: return gameState = self.agents[index].getCurrentObservation() pos1, pos2 = [gameState.getAgentPosition(i) for i in self.agents] # if either agent is pacman, don't swap for agentIndex in self.agents: if gameState.getAgentState(agentIndex).isPacman: return # if the agents are far apart, don't swap if self.getDistancer(gameState).getDistance(pos1, pos2) > 7: return optFunc = max if self.agents[index].red else min closestXToBorder = optFunc(pos1, pos2, key=lambda p: p[0])[0] # if neither agent is close to the border, don't swap if abs(gameState.data.layout.width/2 - closestXToBorder) > 3: return # TODO: verify one of our agents died recently... not sure if necessary? for enemyIndex in self.tracking: # if there is an invader, don't swap if gameState.getAgentState(enemyIndex).isPacman: return # if there is a very imminent threat on the border, don't swap enemyPos = gameState.getAgentPosition(enemyIndex) if enemyPos != None and abs(gameState.data.layout.width/2 - enemyPos[0]) < 3: return # finally, if we haven't fallen out on any of the above conditions, swap roles self.lastSwap = self.gameTime roles = [agent.role for agent in self.agents.values()] for agent, newRole in zip(self.agents.values(), reversed(roles)): agent.role = newRole print "/\\"*100 print "Swapping Roles!" def updateSharedKnowledge(self, index): #ideally, this shouldn't happen since we'll call initTracking from registerInitialState if self.tracking == None: self.initTracking(index, gameState) gameState = self.agents[index].getCurrentObservation() lastGameState = self.agents[index].getPreviousObservation() dists = gameState.getAgentDistances() #an annoyance self.removeDeadAgents(gameState) #update a timer to know how many moves have occurred before this one -- this includes enemy agents if self.gameTime == -1: startFirst = index == 0 #TODO this is incorrect, but it may not be possible to be truly correct. self.gameTime = 0 if startFirst else 1 else: self.gameTime += 2 print "Agent %d calling updateSharedKnowledge at time step %d" % (index, self.gameTime) #check if we died in the last step if lastGameState != None: lastPos = lastGameState.getAgentPosition(index) nowPos = gameState.getAgentPosition(index) if nowPos == gameState.getInitialAgentPosition(index) and self.getDistancer(gameState).getDistance(lastPos, nowPos) > 4: self.lastDeath = self.gameTime #check if the last enemy to move killed itself prevEnemy = index - 1 prevEnemyEaten = False if prevEnemy < 0: prevEnemy += gameState.getNumAgents() prevAlly = prevEnemy - 1 if prevAlly < 0: prevAlly += gameState.getNumAgents() if self.gameTime >= 2: prevAllyState = self.agents[prevAlly].getCurrentObservation() prevEnemyLoc = prevAllyState.getAgentPosition(prevEnemy) if prevEnemyLoc != None and (gameState.getAgentPosition(prevEnemy) == None or gameState.getAgentPosition(prevEnemy) == gameState.getInitialAgentPosition(prevEnemy)): for agentIndex in self.agents: if self.getDistancer(gameState).getDistance(prevEnemyLoc, prevAllyState.getAgentPosition(agentIndex)) <= 1: prevEnemyEaten = True break # check if an enemy ate one of our food in the last time step -- if so, we know where it is. prevEnemyJustAteFood = False prevEnemyFoodEaten = None if self.gameTime >= 2 and self.args["foodInference"]: prevAllyState = self.agents[prevAlly].getCurrentObservation() prevFood = set(self.agents[index].getFoodYouAreDefending(prevAllyState).asList()) foodNow = set(self.agents[index].getFoodYouAreDefending(gameState).asList()) foodDiff = prevFood - foodNow if len(foodDiff) == 1: prevEnemyJustAteFood = True prevEnemyFoodEaten = foodDiff.pop() for enemyIndex, tracker in self.tracking.items(): print "Agent %d observes enemy %d at noisyDistance %d (direct reading: %s) from its viewpoint %s" % (index, enemyIndex, dists[enemyIndex], gameState.getAgentPosition(enemyIndex), gameState.getAgentPosition(index)) # if the enemy is close enough for us to know exactly where it is, just update the tracker with that if gameState.getAgentPosition(enemyIndex) != None: tracker.initializeSpecific(gameState.getAgentPosition(enemyIndex)) print "- it's close enough we have an exact reading, so ignoring noisyDistance" continue # if our check outside the loop indicated the enemy ate, skip observe and elapseTime on it if enemyIndex == prevEnemy and prevEnemyJustAteFood: tracker.initializeSpecific(prevEnemyFoodEaten) print "- enemy just ate food, so we know it's at %s" % tuple([prevEnemyFoodEaten]) continue # if this enemy was the last enemy to move and killed itself, update beliefs to initial position if enemyIndex == prevEnemy and prevEnemyEaten: tracker.initializeSpecific(gameState.getInitialAgentPosition(enemyIndex)) print "- enemy killed itself, resetting to initial position" continue # elapse time once per round if enemyIndex == prevEnemy and self.gameTime != 0: tracker.elapseTime(gameState.deepCopy()) #debug # realPos = gameState.true.getAgentPosition(enemyIndex) # realDistance = util.manhattanDistance(gameState.getAgentPosition(index), realPos) # print "!!! agent %d's view of enemy %d: noisyDistance=%d, realPos=%s, realDistance=%d (delta %d)" % (index, enemyIndex, dists[enemyIndex], realPos, realDistance, dists[enemyIndex]-realDistance) # import capture # assert dists[enemyIndex]-realDistance in capture.SONAR_NOISE_VALUES, "invalid noisyDistance!!!" # observe tracker.observe(dists[enemyIndex], gameState.getAgentPosition(index), self.getDistancer(gameState), gameState) # if the enemy isPacman, then we know it's on our side. If not, we know it's not. if self.args["pacmanInference"]: usRed = self.agents[index].red isPacman = gameState.getAgentState(enemyIndex).isPacman locRed = isPacman if usRed else not isPacman beliefs = tracker.beliefs for loc in beliefs: if gameState.isRed(loc) != locRed: beliefs[loc] = 0 beliefs.normalize() # not sure if I should need to do this, but the belief distribution seems to eventually be empty if tracker.getBeliefDistribution().totalCount() == 0: tracker.initializeUniformly(gameState) #observe again so distribution isn't useless tracker.observe(dists[enemyIndex], gameState.getAgentPosition(index), self.getDistancer(gameState), gameState) print "- enemy %d's tracker being reset due to being empty." % enemyIndex print "- enemy %d now thought to occupy %s" % (enemyIndex,self.getAveragedEnemyLocation(enemyIndex)) self.maybeSwapRoles(index) def updateDisplay(self, gameState, curIndex): dists = [self.tracking[i].getBeliefDistribution() if i in self.tracking else None for i in range(gameState.getNumAgents())] if self.args["showMiscDistributions"]: for i in range(len(dists)): if dists[i] == None and i in self.agents: dists[i] = self.agents[i].miscDistribution self.agents[curIndex].displayDistributionsOverPositions(dists) def getBeliefDistribution(self, enemyIndex): return self.tracking[enemyIndex].getBeliefDistribution() def getAveragedEnemyLocation(self, enemyIndex): xavg = 0 yavg = 0 for pos, prob in self.tracking[enemyIndex].getBeliefDistribution().items(): x, y = pos xavg += x * prob yavg += y * prob avgPoint = util.nearestPoint((xavg, yavg)) # annoying thing because mazeDistance doesn't work if one point is a wall if avgPoint in self.walls: neighbors = list(getNeighbors(avgPoint)) neighbors = [n for n in neighbors if n in self.legalPositions] if len(neighbors) > 0: avgPoint = neighbors[0] else: raise Exception("avg enemy location is wall surrounded by walls") return avgPoint def getDistancer(self, gameState = None): if self.distancer != None: return self.distancer # this should never happen, since registerInitialState calls this with a gameState if gameState == None: raise Exception("getDistancer called without gameState, but no distancer has been inited yet") distancer = distanceCalculator.Distancer(gameState.data.layout) distancer.getMazeDistances() self.distancer = distancer self.walls = gameState.getWalls().asList() self.legalPositions = gameState.getWalls().asList(False) return distancer def getPath(self, gameState, source, target): # basic caching of paths if (source, target) in self.pathCache: #print "Found path from %s to %s in pathCache" % (source, target) return self.pathCache[(source, target)] elif (target, source) in self.pathCache: #print "Found path from %s to %s in pathCache" % (source, target) return reversed(self.pathCache[(target, source)]) print "getPath(%s, %s) called, computing using A*" % (source, target) # compute path using A* search with known optimal maze distance as heuristic problem = MSBPathfindingSearchProblem(source, target, self.legalPositions) def heuristic(state, prob): return self.getDistancer(gameState).getDistance(state, target) path = search.astar(problem, heuristic) assert len(path) == self.getDistancer(gameState).getDistance(source, target), "A* found non-optimal path from %s to %s" % (source, target) # update cache self.pathCache[(source, target)] = path for i in range(0,len(path)-1): self.pathCache[(path[i], target)] = path[i+1:] print "getPath(%s, %s) returning; len(pathCache)=%d" % (source, target, len(self.pathCache)) return path def reportRuntime(self, elapsedTime): self.totalRuntime += elapsedTime self.nRuntimeSamples += 1 avgRuntime = self.totalRuntime / self.nRuntimeSamples if avgRuntime > 0.7 and self.emergencyScale >= 0.4: self.emergencyScale -= 0.25 if "original_maxFoodToPathfind" not in self.args: self.args["original_maxFoodToPathfind"] = self.args["maxFoodToPathfind"] self.args["maxFoodToPathfind"] = int(self.args["original_maxFoodToPathfind"] * self.emergencyScale) print "########################### Emergency timeout prevention: reducing maxFoodToPathfind to %d (last move took %.3f seconds; average is %.3f seconds)" % (self.args["maxFoodToPathfind"], elapsedTime, avgRuntime) self.totalRuntime = 0 self.nRuntimeSamples = 0 # this is used to come up with a distribution of possible enemy agent successor positions for the elapseTime updates # TODO: maybe in the future, use a copy of our own agent to predict this? class MSBSimulatedEnemyAgent: def __init__(self, index, distancer, uniform = False): self.index = index self.distancer = distancer self.weights = { "default" : 1 } if uniform: self.agent = self return #currently, this uses BaselineAgents' agents. #To use the features/weights in this class, set self.agent = self try: from BaselineAgents.baselineAgents import OffensiveReflexAgent, DefensiveReflexAgent self.agent = OffensiveReflexAgent(index) if index%2==0 else DefensiveReflexAgent(index) self.agent.distancer = distancer except: #if BaselineAgents isn't accessible, fallback gracefully self.agent = self def getFeatures(self, state, action): return {"default":1} def evaluate(self, state, action): #fall through to another agent if we have one if self.agent is None: return 0 elif self.agent != self: return max(0, self.agent.evaluate(state, action)) features = self.getFeatures(state, action) amts = [features[i]*self.weights[i] if i in self.weights else 0 for i in features] return max(sum(amts), 0) def getDistribution(self, gameState): #get the utilities from the agent and find the maximum utilities = {action: self.evaluate(gameState, action) for action in gameState.getLegalActions(self.index)} maxUtility = max(utilities.values()) #any action that maximizes utility gets equal probability, all else get 0 tbr = util.Counter() for action in utilities: if utilities[action] < maxUtility: continue tbr[action] = 1 tbr.normalize() return tbr class MSBPathfindingSearchProblem(search.SearchProblem): def __init__(self, source, target, legalPositions): self.source = source self.target = target self.legalPositions = legalPositions def getStartState(self): return self.source def isGoalState(self, state): return state == self.target def getSuccessors(self, state): return [(c, c, 1) for c in getNeighbors(state, includeDiagonals=False) if c in self.legalPositions] def getCostOfActions(self, actions): return len(actions)
UTF-8
Python
false
false
15,338
py
18
factory.py
16
0.734842
0.730017
0
418
35.69378
216
jaljs/myworkspace
17,540,646,454,370
a8cd40d32024a85f52d1c79bca9444da8e774413
409d4ad370e25c23691052645ef1eb2b6deaf341
/car/webcontrol/main.py
c4034c237400056b912a0d97906d65672d0162d4
[]
no_license
https://github.com/jaljs/myworkspace
5204de1ebda0ba1657d76ae90690706f89c325bb
c27240a022cccbc0bb52a6dd6d3f61217111b6ed
refs/heads/master
2021-01-17T15:26:44.217667
2018-06-06T10:20:11
2018-06-06T10:20:11
83,683,768
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 from bottle import get,post,run,request,template @get("/") def index(): return template("index") @post("/cmd") def cmd(): print("按下了按钮: "+request.body.read().decode()) return "OK" @post("/mcmd") def mcmd(): print("pull-------------->"+request.body.read().decode()) run(host="0.0.0.0",post=8080)
UTF-8
Python
false
false
343
py
63
main.py
6
0.60961
0.582583
0
14
22.714286
58
zhuping580/myflaskdemo
10,505,490,024,520
ebc4be3c42b24c12527743b87c6dee3720f608e6
988eb29092e518638130e53632f2a0910cea0e79
/app/cases.py
2c04c30b9c9e887c32ccbe51c6e8f30340b4abd0
[]
no_license
https://github.com/zhuping580/myflaskdemo
19515b59e90cfcfceb2fae0c1911caa65748bf26
34fa07baae62783d0b8bfe4b0969b4615cb0f74b
refs/heads/master
2023-07-01T13:44:53.904576
2021-08-09T08:16:22
2021-08-09T08:16:22
388,406,288
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json from datetime import datetime from flask import Blueprint, request, jsonify import requests from common.token_method import login_required, verify_token from common import db from common.modelFuntion import CreateCase # 创建蓝图 cases = Blueprint('cases', __name__) @cases.route('/cases/update', methods=['POST']) @login_required def update_cases(): userid = verify_token() data = request.get_data() json_data = json.loads(data) print(json_data) priority = json_data['priority'] title = json_data['title'] enter = json_data['enter'] outs = json_data['outs'] updated_by = json_data['updated_by'] if 'id' in json_data.keys(): _id = json_data['id'] if db.query_db("select * from cases where id='{}'".format(_id)) is not None: _sql = "update cases set priority='%s',title='%s',enter='%s',outs='%s',updated_by=%d where id='%d'" % \ (priority, title, enter, outs, updated_by, userid) e = db.change_db(_sql) if e: return jsonify(code=-1, message=u"操作失败") return jsonify(code=0, message=u"修改成功") # 新增 ctime = datetime.now().strftime("%Y-%m-%d %H:%M:%S") _sql = '''insert into cases (priority,title,enter,outs,created,created_by) value ('%s', '%s', '%s', '%s', '%d')'''\ % (priority, title, enter, outs, ctime, userid) e = db.change_db(_sql) if e: return jsonify(code=-1, message=u"操作失败") return jsonify(code=0, message=u"success") @cases.route('/cases/list', methods=['GET']) @login_required def cases_list(): """获取模型数据""" # print('userid:', userid) print(request.args) i_id = int(request.args.get("i_id")) # pageSize = int(request.args.get("pageSize")) # pageNum = int(request.args.get("currentPage")) data = [] database = db.query_db("select id,priority,title,enter,outs,result from cases where i_id=%d;" % i_id) for i in database: temp = {} temp['id'] = i[0] temp['priority'] = i[1] temp['title'] = i[2] temp['enter'] = i[3] temp['outs'] = i[4] temp['result'] = i[5] data.append(temp) return jsonify(code=0, message=u"success", data=data) @cases.route('/cases/delete', methods=['POST']) @login_required def delete_cases(): data = request.get_data() json_data = json.loads(data) print(json_data) _id = json_data['id'] _sql = "delete from cases where id=%d" % _id e = db.change_db(_sql) if e: return jsonify(code=-1, message=u"操作失败") return jsonify(code=0, message=u"success") @cases.route('/cases/create', methods=['POST']) @login_required def create_case(): userid = verify_token() data = request.get_data() json_data = json.loads(data) i_ids = json_data['i_id'] print(i_ids) for i_id in i_ids: db_data = db.query_db( "select name,case1,maxlength,minlength,required,options from params where i_id=%d" % i_id ) title = db.query_db("select name from interface where id=%d" % i_id)[0][0] module_data = [] for i in db_data: temp = {} temp['name'] = i[0] temp['case1'] = i[1] temp['maxlength'] = i[2] temp['minlength'] = i[3] temp['required'] = i[4] temp['options'] = i[5] module_data.append(temp) cases_data = CreateCase(module_data, title).get_case() db.change_db("delete from cases where i_id=%d" % i_id) ctime = datetime.now().strftime("%Y-%m-%d %H:%M:%S") insert_datas = [] for k in cases_data: # print('用例', k) insert_data = [] insert_data.append(i_id) for g in k: insert_data.append(str(k[g])) insert_data.append(ctime) insert_data.append(userid) insert_data.append('auto') insert_data = tuple(insert_data) insert_datas.append(insert_data) insert_datas = tuple(insert_datas) print('————————', insert_datas) _sql = "insert into cases (i_id,title,enter,outs,priority,created,created_by,type) " \ "value (%s,%s,%s,%s,%s,%s,%s,%s)" e = db.change_db(_sql, insert_datas) print('e', e) return jsonify(code=0, message=u"success") @cases.route('/cases/execute', methods=['POST']) @login_required def execute_cases(): data = request.get_data() json_data = json.loads(data) print(json_data) if 'id' in json_data.keys: _id = json_data['id'] cases = db.db_json('cases', 'id='+str(_id), 'title', 'enter', 'outs', 'i_id') i_id = cases[0]['i_id'] elif 'i_id' in json_data.keys: i_id = json_data['i_id'] cases = db.db_json('cases', 'id='+str(i_id), 'title', 'enter', 'outs', 'i_id') else: return jsonify(code=-1, message=u"参数错误") interface = db.db_json('interface', 'id='+str(i_id), 'methods', 'url', 'login_required') interface = interface[0] systems = db.db_json('systems', None, 's_key', 'val', 'type') url = '' headers = {} for i in systems: if i['s_key'] == 'url': url = i['val'] elif i['s_key'] == 'token': headers['token'] = i['val'] elif i['s_key'] == 'Cookie': headers['Cookie'] = i['val'] for case in cases: if interface['methods'] == 'post': _result = requests.post(url=url+interface['url'], json=json.loads(case["enter"].replace("'", '"')), headers=headers) elif interface['methods'] == 'get': _result = requests.get(url=url+interface['url'], params=json.loads(case['enter'].replace("'", '"')), headers=headers) else: return jsonify(code=0, message=u"'不支持' + i_data['methods'] + '请求方式'") sql3 = "update cases set result ='%s' where id=%d" % (_result.text, _id) db.change_db(sql3) return jsonify(code=0, message=u"success") def login(): url = "http://192.168.3.66:9001/user/login/password" data = { "mobile": "18111111111", "password": "Sulongfei@123456", "mobileAraeCode": "+86", "regType": 0, "email": "" } response = requests.post(url=url, json=data) # print('请求头', response.request.headers) # print('请求体', response.request.body) # print('响应头', response.headers) cookie = response.headers['Set-Cookie'] cookie = cookie.split(';', 1)[0] token = response.json()['data']['token'] _sql = "update systems set val='%s' where s_key='Cookie'" % cookie db.change_db(_sql) _sql0 = "update systems set val='%s' where s_key='token'" % token db.change_db(_sql0) if __name__ == '__main__': login()
UTF-8
Python
false
false
6,889
py
19
cases.py
17
0.557508
0.547468
0
201
32.696517
129
Amanikashema/data_type2
4,294,967,318,563
56b3385eb806bb380b6c9f1318f1f91839a4b457
7e959170963990a0d65f322d9f34fc098c65ff03
/data_type2.py
312f594894134deb291a1515ac0163806469e272
[]
no_license
https://github.com/Amanikashema/data_type2
7def61cce786fd1858a7e7f631ec8d0c89270132
0e6b157de1ece1b94cb7ba4b6d94476dad719a36
refs/heads/master
2021-03-08T03:20:57.207886
2020-03-10T13:51:50
2020-03-10T13:51:50
246,313,414
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
my_list=[56,78,34,21,56,34,125,45,89,75,12,56] my_list.sort() print(my_list) total=sum(my_list) #using a function of sum to add the total print("The sum of all of the element is:", total) print("The smallest number in the list is:", min(my_list)) print("The largest number in the list is:",max(my_list)) my_list=list(set(my_list)) print("List after removing duplicate elements", my_list)
UTF-8
Python
false
false
397
py
1
data_type2.py
1
0.70529
0.642317
0
13
29.153846
60
iammanoj/PythonML
8,632,884,313,131
be85317e906b4b4410000c5ff9368d70bf30e9aa
71a6d0d09329be51c25b8d10be03ded4dd3f9f02
/ChkPalindrome.py
b578516dfa4fe981a72672ca0746b3761995b5f8
[]
no_license
https://github.com/iammanoj/PythonML
3301f099271a072d6bb8d8ac9b92d3387fae8f88
0d0f67dea8c26e2cb389aa157fdc6b41d53f0f1f
refs/heads/master
2021-01-02T09:01:31.154236
2015-05-17T22:34:27
2015-05-17T22:34:27
34,771,477
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/Users/manoj_mohan/anaconda/bin/python ############################################ # Program Name : Check Palindrome Words # Description : This program helps determine any combination of words from a given list of words if they can be joined together to form a Palindrome. # Coder : Manoj Mohan # Create Date : 05/11/2015 # Mod Log : # # # ############################################ ## Import libraries from copy import deepcopy list = ["bat","tab","cat","tar","rat", "ough", "stuff"] for item in list: for RestEachItem in list: if item <> RestEachItem: if item == RestEachItem[::-1]: print item, RestEachItem list.remove(item)
UTF-8
Python
false
false
779
py
5
ChkPalindrome.py
4
0.504493
0.49294
0
23
32.782609
165
Karl-Horning/python-3-bootcamp
5,265,629,927,481
421e1b7d38ebef96563b479773d571fae5a8f753
b1b376358edef6faf1ba1c5559e7ae757c1dd8a9
/section_16_tuples_and_sets/144_set_comprehension_and_recap.py
13f08a7198a9b6a7d65a984d94e0db3c6f42273c
[]
no_license
https://github.com/Karl-Horning/python-3-bootcamp
e3894003310f123e9f3e040a490687ee9ff40cc9
f78d1cc5c6a42539f724735d83bd74f77f6653f6
refs/heads/master
2020-03-24T15:22:14.566141
2018-07-29T19:23:23
2018-07-29T19:23:23
142,787,548
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
print({x**2 for x in range(10)}) # {0, 1, 64, 4, 36, 9, 16, 49, 81, 25} print({char.upper() for char in 'hello'}) # {'E', 'H', 'L', 'O'}
UTF-8
Python
false
false
137
py
67
144_set_comprehension_and_recap.py
67
0.49635
0.357664
0
5
26.6
41
tedneward/Demos
19,585,050,877,506
fd4d49000c2090df01db262e57962878697ac037
64e3f2b8d6abff582d8dff2f200e0dfc708a5f4b
/2019/VSLiveBoston/Py/demo.py
9aef065bf91f0261ed203b3181c5326c6667d76c
[]
no_license
https://github.com/tedneward/Demos
a65df9d5a0390e3fdfd100c33bbc756c83d4899e
28fff1c224e1f6e28feb807a05383d7dc1361cc5
refs/heads/master
2023-01-11T02:36:24.465319
2019-11-30T09:03:45
2019-11-30T09:03:45
239,251,479
0
0
null
false
2023-01-07T14:38:21
2020-02-09T05:21:15
2020-02-10T03:44:36
2023-01-07T14:38:20
217,845
0
0
74
Java
false
false
print("Hello Boston") class Attendee: def sayHello(): print("Hello")
UTF-8
Python
false
false
87
py
462
demo.py
289
0.574713
0.574713
0
5
15.4
22
linea-it/lna
19,567,871,022,832
0602a6c03af3db06fbdcb7858560a46c73afbd24
b1ff1b8920f8bee4d9fdaad938b25daedc51f7ad
/backend/lna/migrations/0007_auto_20190211_1324.py
e92ef927d26c21a9666fe3029f3a102e16aa0b87
[]
no_license
https://github.com/linea-it/lna
141d051c8b3e3c061ef2016ee48276a4538ddc88
dd7c82059c41180d584bd8b201ea2d843878ef8e
refs/heads/master
2022-12-17T12:29:04.291593
2020-07-01T19:27:21
2020-07-01T19:27:21
148,836,030
0
0
null
false
2022-12-09T13:57:18
2018-09-14T20:00:05
2020-07-01T20:40:59
2022-12-09T13:57:17
430,971
0
0
26
JavaScript
false
false
# Generated by Django 2.1.5 on 2019-02-11 13:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('lna', '0006_exposure_target'), ] operations = [ migrations.AddIndex( model_name='exposure', index=models.Index(fields=['filename'], name='lna_exposur_filenam_1f8e84_idx'), ), migrations.AddIndex( model_name='exposure', index=models.Index(fields=['date'], name='lna_exposur_date_2a310b_idx'), ), migrations.AddIndex( model_name='exposure', index=models.Index(fields=['date_obs'], name='lna_exposur_date_ob_b3f2c1_idx'), ), migrations.AddIndex( model_name='exposure', index=models.Index(fields=['target'], name='lna_exposur_target_4d256e_idx'), ), migrations.AddIndex( model_name='exposure', index=models.Index(fields=['ra', 'dec'], name='lna_exposur_ra_4a1ba3_idx'), ), ]
UTF-8
Python
false
false
1,047
py
45
0007_auto_20190211_1324.py
39
0.572111
0.536772
0
33
30.727273
91
kiramishima/csv_pokeapi
10,857,677,350,067
4c55efeb527bf1dccb0783adc71987022f9b1dc2
4e0fce17cf26f8661c2bafa70c1b041ac41285f5
/app.py
66fa0c74120761ec9b1524a9d251ebb27953f950
[]
no_license
https://github.com/kiramishima/csv_pokeapi
3cc69cd00e99c790155bb7ebae1a3c793cc32603
1f65e2ddd64ca8545a03039b775a878e7ac95536
refs/heads/master
2023-09-03T20:24:29.109257
2021-10-15T17:37:02
2021-10-15T17:37:02
417,278,128
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json import math from flask import Flask, jsonify, request, render_template import pandas as pd app = Flask(__name__) # Load CSV app.data = pd.read_csv("./data/pokemon.csv", header=0, sep='\t', names=['id', 'name', 'type_1', 'type_2', 'total', 'hp', 'atk', 'def', 'sp_atk', 'sp_def', 'speed', 'generation', 'legendary']) @app.route('/') def home(): """ This Endpoint return only the swagger documentation """ return render_template('index.html') @app.route('/api/v1/pokemon') def all(): """ This enpoint returns a list of pokemon paginated """ page = request.args.get('page', 1, type=int) per_page = request.args.get('per_page', 10, type=int) resp = pagination(app.data, page, per_page) return jsonify(resp) def pagination(df, start_page = 1, per_page = 15): """ Paginate function """ pagesize = per_page page = start_page - 1 max_pages = math.ceil(df.shape[0] / pagesize) return { 'total_records': df.shape[0], 'current_page': page + 1, 'max_pages': max_pages, 'per_page': pagesize, 'data': json.loads(df.iloc[page * pagesize: (page + 1) * pagesize].to_json(orient='records', force_ascii=False)) } @app.route('/api/v1/pokemon/<poke_id>', methods=['GET']) def find(poke_id): """ This endpoint filters the dataset by id or by pokemon's name and return one result """ pfind = app.data[(app.data['id'] == poke_id) | (app.data['name'] == poke_id)] dtjson = json.loads(pfind.to_json(orient='records', force_ascii=False)) resp = {'status': True, 'data': dtjson} return jsonify(resp) @app.route('/api/v1/pokemon', methods=['POST']) def create_pokemon(): """ Endpoint to create a new pokemon in the dataset """ form = request.form print(form) pokemon = {"id": form.get('id'), "name": form.get('name'), "type_1": form.get('type_1'), "type_2": form.get('type_2'), "total": form.get('total'), "hp": form.get('hp'), "atk": form.get('atk'), "def": form.get('def'), "sp_atk": form.get('sp_atk'), "sp_def": form.get('sp_def'), "speed": form.get('speed'), "generation": form.get('generation'), "legendary": form.get('legendary')} # print(pokemon) new_row = pd.Series(pokemon) app.data = app.data.append(new_row, ignore_index=True) resp = {'status': True, 'data': pokemon} return jsonify(resp) @app.route('/api/v1/pokemon/<poke_id>', methods=['POST']) def update_pokemon(poke_id): """ Update pokemon endpoint """ form = request.form # build response pokemon = {"id": form.get('id'), "name": form.get('name'), "type_1": form.get('type_1'), "type_2": form.get('type_2'), "total": form.get('total'), "hp": form.get('hp'), "atk": form.get('atk'), "def": form.get('def'), "sp_atk": form.get('sp_atk'), "sp_def": form.get('sp_def'), "speed": form.get('speed'), "generation": form.get('generation'), "legendary": form.get('legendary')} pfind = app.data.loc[(app.data['id'] == poke_id) | (app.data['name'] == poke_id)] pfind = app.data.loc[pfind.index, :] pfind.id = form.get('id') pfind.name = form.get('name') pfind.type_1 = form.get('type_1') pfind.type_2 = form.get('type_2') pfind.total = form.get('total') pfind.hp = form.get('hp') pfind.atk = form.get('atk') pfind['def'] = form.get('def') pfind.sp_atk = form.get('sp_atk') pfind.sp_def = form.get('sp_def') pfind.speed = form.get('speed') pfind.generation = form.get('generation') pfind.legendary = form.get('legendary') # print(pfind) app.data.update(pfind) resp = {'status': True, 'message': 'Pokemon has been updated', 'data': pokemon} return jsonify(resp) @app.route('/api/v1/pokemon/<poke_id>', methods=['DELETE']) def delete_pokemon(poke_id): """ Endpoint for delete a pokemon """ # find the target pfind = app.data.loc[(app.data['id'] == poke_id) | (app.data['name'] == poke_id)] # process to delete app.data.drop(index=pfind.index, inplace=True) resp = {'status': True, 'message': 'Pokemon has been deleted'} return jsonify(resp) if __name__ == '__main__': app.run()
UTF-8
Python
false
false
4,320
py
4
app.py
1
0.580556
0.57338
0
123
34.121951
120
893202527/JK
17,343,077,975,191
ec5dce0013a7402996c251e043c6882a5e3b05af
326269d5c3740bed8ca5ef65aeab35ada4ced727
/MyDjango/xycDemo/models.py
aaee15810df7422dc28fa68b4b9b644b8308402b
[]
no_license
https://github.com/893202527/JK
43115fbc08b119de5c49ac4b470e4cbcbcdc917b
0080f3d349093ce771df94f70edd6010a9fd3ff7
refs/heads/master
2022-11-12T04:48:34.174950
2019-09-02T09:12:17
2019-09-02T09:12:17
180,317,671
1
1
null
false
2022-11-01T23:31:18
2019-04-09T08:11:31
2019-09-02T09:12:45
2019-09-02T09:12:43
50,926
0
1
1
Python
false
false
import datetime from django.db import models import json from django.utils import timezone # Create your models here. class User(models.Model): nickName=models.CharField(max_length=20) create_time=models.DateTimeField(auto_now_add=True) Modify_time=models.DateTimeField(auto_now=True) phoneNumber=models.CharField(max_length=11,unique=True) username=models.CharField(max_length=20) age=models.CharField(max_length=3) sex =( ('male','男'), ('female','女'),) password=models.CharField(max_length=100) def __str__(self): smart = {'phoneNumber':self.phoneNumber, 'nickName':self.nickName, 'age':self.age, 'sex':self.sex, 'username':self.username, 'password':self.password } return json.dumps(smart) class user_info(models.Model): userid=models.ForeignKey(User,on_delete=models.CASCADE) ip=models.CharField(max_length=100) login_time=models.DateTimeField(auto_now=True) login_times=models.IntegerField(auto_created=True)#实现自增,AUTO_INCREMENT=100从100开始自增
UTF-8
Python
false
false
1,162
py
23
models.py
20
0.652632
0.635965
0
36
30.583333
86
sts-sadr/Hands-of-Machine-Learning
13,417,477,874,901
e7c9458e97389ed8156fe3b2628be7c68e0f3ef2
cefc8caac20ec430265dfbe56b3d9b8ab1acb34d
/MNIST.py
3d961929ef7ee20b2fcc5f9c0e6e2c162a4a99a6
[]
no_license
https://github.com/sts-sadr/Hands-of-Machine-Learning
ca9d9bbf12d141928210913ee9567f7f2b436cdf
16d3c64a43d11170f24ab46f407a987f7fc46623
refs/heads/master
2021-01-07T08:16:53.190991
2019-09-03T18:55:24
2019-09-03T18:55:24
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Sep 3 13:02:32 2019 @author: jered.willoughby """ #Load libraries from sklearn.datasets import fetch_openml import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import StratifiedKFold from sklearn.base import clone from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score from sklearn.metrics import f1_score mnist = fetch_openml('mnist_784',version=1) mnist.keys() #Descriptive statistics X,y = mnist["data"], mnist["target"] X.shape y.shape some_digit = X[0] some_digit_image = some_digit.reshape(28,28) #Select 0 indexed image from the mnist data and plot it plt.imshow(some_digit_image, cmap="binary") plt.axis("off") plt.show() #validate from the target set y[0] #this is a string value #Change datatype to integer y = y.astype(np.uint8) #Create the testing and training sets X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:] #Now we need to determine what classifier to use. Let's start with a binary #target vector classification y_train_5 = (y_train == 5) y_test_5 = (y_test == 5) #Binary classifier = stochastic gradient descent SGDClassifier from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier(random_state=42) sgd_clf.fit(X_train,y_train_5) sgd_clf.predict([some_digit]) #Cross Validation skfolds = StratifiedKFold(n_splits=3, random_state=42) for train_index, test_index in skfolds.split(X_train, y_train_5): clone_clf = clone(sgd_clf) X_train_folds = X_train[train_index] y_train_folds = y_train_5[train_index] X_test_fold = X_train[test_index] y_test_fold = y_train_5[test_index] clone_clf.fit(X_train_folds, y_train_folds) y_pred = clone_clf.predict(X_test_fold) n_correct = sum(y_pred == y_test_fold) print(n_correct / len(y_pred)) #Determine cross validation score - 3 folds cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy") #Prediction set selection from cross val y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3) #Confusion matrix from prior variable set confusion_matrix(y_train_5, y_train_pred) #Performance metric - precision + recall score precision_score(y_train_5, y_train_pred) recall_score(y_train_5, y_train_pred) #We can combine the two into the F1 score - harmonic mean f1_score(y_train_5, y_train_pred) #Note that there is a way to get the optimal threshold: decision_function() #method, which returns a score for each instance, and then use any threshold #you want to make predictions based on those scores.
UTF-8
Python
false
false
2,815
py
2
MNIST.py
2
0.71865
0.694139
0
85
31.141176
77
pydi0415/git_zeroo
17,145,509,487,499
f69cf63b9458a5905c410d34f1cb31ddebafc104
0f0a2ae1c525a3f6ab4ceb9652e3a95abb42fe0d
/python/assignment/Basic pgms.py
1b566105fc64540b2ea620e9037fabaa6d2d7dd6
[]
no_license
https://github.com/pydi0415/git_zeroo
c47a21193f32651a29f10c5fec461bcc6f3868cb
0d50e29a257041619031a39d7abe252db59ea019
refs/heads/main
2023-03-14T13:02:06.166224
2021-02-26T14:26:55
2021-02-26T14:26:55
342,595,855
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
print("prabhakar\nprasu") print("----------------------------") print("prabhakar\tprasu") print("----------------------------") #print("prabhakar"prabha"prasu" )(error) print('''Hi\n This is penchala\tprabhakr From Ap''') print("----------------------------") print("s@athya\"Techonlogy\"") print("----------------------------") print("Heloo 'Prasu' Iam sharing my \"python meterial\" Check once " "\nIn this\tyear i get a \"job\" ") print("----------------------------") print("Prabhakar@0415"+"123456789") print("----------------------------") print(20%3) print("----------------------------") print("10"+"20") print("----------------------------") print("prabhakar","prasu",'BestFriends','''From school standerd to Still know''') print("----------------------------") print("python""-""Django") print("----------------------------") print("\n","Employee","_","UnEmployee") print("----------------------------") #print("prabhakar"+3) print("3+3+""Prabahakar") print("----------------------------") #print("7"-2) print("----------------------------") print("Prabhakar\n\t\"Penchala") print("----------------------------") print("----------------------------") print("**\"prabha\"kar**") print("\n--python--") print("----------------------------") print("\t\tPrabhakar\tpydi") print("\t\t==========\t====") print("----------------------------") studentname='Prabhakar' print("welcome",studentname) print("----------------------------") #o1=int(input("Enter first number:")) #no2=int(input("Enter second number:")) #print("adition=",no1+no2) #print("sub=",no1-no2) #print("mul=",no1*no2) #print("div=",no1/no2) #print("mod=",no1%no2) #print("FlorDiv=",no1//no2) #print("Exponential=",no1**no2) #print("----------------------------") #x='prabha.415' #print(x) #print(type(x)) print("----------------------------") #Fname=input("Enter first name:") #Lname=input("Enter last name:") #print(Fname + Lname) print("----------------------------") #x=input("Enter no:") #print(x) #idno=int(input("Student IDNO:")) #name=input("Name:") #m1=int(input("marks1:")) #m2=int(input("marks2:")) #m3=int(input("marks3:")) #print(idno) #print(name) #print("Total marks=",m1+m2+m3) #print("Avgmarks=",m1+m2+m3/3) print("----------------------------") x=5 if x>0: print("+ve number") if x<0: print("-ve number") print("----------------------------")
UTF-8
Python
false
false
2,348
py
17
Basic pgms.py
16
0.456559
0.431005
0
77
29.480519
81
ritwikbera/GazeAtari
17,884,243,858,487
a4c31c80d6125de528aee80bd1ebdabcf6736155
4cbe3a5dfc11227ac85e77980e3f9eb0cc20aa13
/train.py
0d33bdb47f07239159259b6c23716ae48c8ec896
[]
no_license
https://github.com/ritwikbera/GazeAtari
42e36ab90b1b3fd9943fa5ad2e1b051731827666
7d9840474e91e41422dd551e20c752afe82edd10
refs/heads/master
2022-08-21T07:04:14.542815
2020-05-26T01:51:36
2020-05-26T01:51:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import torch from torch import nn from torch.optim import Adam, Rprop import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter import numpy as np import json import os from math import ceil from ignite.engine import Events, Engine from ignite.metrics import Loss, RunningAverage, Accuracy, MeanSquaredError from ignite.utils import setup_logger from ignite.handlers import ModelCheckpoint # from ignite.contrib.handlers.tqdm_logger import ProgressBar from tqdm import tqdm import models import utils from models import * from utils import * current_dir = os.getcwd() torch.manual_seed(0) np.random.seed(0) def run(config): train_loader = get_instance(utils, 'dataloader', config, 'train') val_loader = get_instance(utils, 'dataloader', config, 'val') model = get_instance(models, 'arch', config) model = init_model(model, train_loader) model, device = ModelPrepper(model, config).out loss_fn = get_instance(nn, 'loss_fn', config) trainable_params = filter(lambda p: p.requires_grad, model.parameters()) optimizer = get_instance(torch.optim, 'optimizer', config, trainable_params) writer = create_summary_writer(config, model, train_loader) batch_size = config['dataloader']['args']['batch_size'] if config['mode'] == 'eval' or config['resume']: model.load_state_dict(torch.load(config['ckpt_path'])) epoch_length = int(ceil(len(train_loader)/batch_size)) desc = "ITERATION - loss: {:.2f}" pbar = tqdm(initial=0, leave=False, total=epoch_length, desc=desc.format(0)) def process_batch(engine, batch): inputs, outputs = func(batch) model.train() model.zero_grad() optimizer.zero_grad() preds = model(inputs) loss = loss_fn(preds, outputs.to(device)) a = list(model.parameters())[0].clone() loss.backward() optimizer.step() # check if training is happening b = list(model.parameters())[0].clone() try: assert not torch.allclose(a.data, b.data), 'Model not updating anymore' except AssertionError: plot_grad_flow(model.named_parameters()) return loss.item() def predict_on_batch(engine, batch): inputs, outputs = func(batch) model.eval() with torch.no_grad(): y_pred = model(inputs) return inputs, y_pred, outputs.to(device) trainer = Engine(process_batch) trainer.logger = setup_logger("trainer") evaluator = Engine(predict_on_batch) evaluator.logger = setup_logger("evaluator") if config['task'] == 'actionpred': Accuracy(output_transform=lambda x: (x[1], x[2])).attach(evaluator, 'val_acc') if config['task'] == 'gazepred': MeanSquaredError(output_transform=lambda x: (x[1], x[2])).attach(evaluator, 'val_MSE') RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss') training_saver = ModelCheckpoint(config['checkpoint_dir'], filename_prefix='checkpoint_'+config['task'], n_saved=1, atomic=True, save_as_state_dict=True, create_dir=True, require_empty=False) trainer.add_event_handler(Events.EPOCH_COMPLETED, training_saver, {'model': model}) @trainer.on(Events.ITERATION_COMPLETED) def tb_log(engine): pbar.desc = desc.format(engine.state.output) pbar.update(1) writer.add_scalar('training/avg_loss', engine.state.metrics['loss'] ,engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def print_trainer_logs(engine): pbar.refresh() avg_loss = engine.state.metrics['loss'] tqdm.write('Trainer Results - Epoch {} - Avg loss: {:.2f} \n'.format(engine.state.epoch, avg_loss)) viz_param(writer=writer, model=model, global_step=engine.state.epoch) pbar.n = pbar.last_print_n = 0 @evaluator.on(Events.EPOCH_COMPLETED) def print_result(engine): try: print('Evaluator Results - Accuracy {} \n'.format(engine.state.metrics['val_acc'])) except KeyError: print('Evaluator Results - MSE {} \n'.format(engine.state.metrics['val_MSE'])) @evaluator.on(Events.ITERATION_COMPLETED) def viz_outputs(engine): visualize_outputs(writer=writer, state=engine.state, task=config['task']) if config['mode'] == 'train': trainer.run(train_loader, max_epochs=config['epochs'], epoch_length=epoch_length) pbar.close() evaluator.run(val_loader, max_epochs=1, epoch_length=int(ceil(len(val_loader)/batch_size))) writer.flush() writer.close() if __name__ == "__main__": config = json.load(open('config.json')) run(config)
UTF-8
Python
false
false
4,902
py
21
train.py
18
0.631783
0.628519
0
148
32.128378
107
landont3/GolfLeagueScoring
7,198,365,228,995
4437ae18a2721e62f6726a88cc3119e25e44f560
48075e439cb159a1eddc7a637b3e955a012ee214
/league/admin.py
5e1423f65df502d5cb49ed6bcad60ab69fb514c2
[]
no_license
https://github.com/landont3/GolfLeagueScoring
e503154ff84d907597393b86c1d4e4039ddfca9e
c71ac20b1a26c016db33003e87a8f4a8dc64f0f5
refs/heads/master
2021-03-22T01:11:36.267737
2018-03-08T12:21:26
2018-03-08T12:21:26
122,187,660
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from .models import League, Division, Season, SeasonSettings, Course, Nine, Hole @admin.register(SeasonSettings) class SeasonSettingsAdmin(admin.ModelAdmin): list_display = ('season', 'handicap_method', 'max_score_to_par', 'max_handicap') list_editable = ('handicap_method', 'max_score_to_par', 'max_handicap') admin.site.register(League) admin.site.register(Division) admin.site.register(Season) admin.site.register(Course) admin.site.register(Nine) admin.site.register(Hole)
UTF-8
Python
false
false
523
py
31
admin.py
21
0.762906
0.762906
0
17
29.764706
84
VR-Scott/practice_team_08
10,256,381,926,421
0bf8ca6bdc4474f497844ace2645eb0ba8a44097
511583a2223f86e5028839763cb867bbc9931ba7
/encryption.py
cc6984c73aaa934b9ec04d1d93ac30f921c4be32
[]
no_license
https://github.com/VR-Scott/practice_team_08
005384695bd5a84086fbbac651f5e216314951d7
4f36ba4cc767e3bf9a77579c1569be1034a4846c
refs/heads/master
2023-01-30T14:31:08.745287
2020-12-11T14:39:46
2020-12-11T14:39:46
308,270,721
0
1
null
false
2020-11-16T07:18:31
2020-10-29T08:58:18
2020-11-13T08:30:26
2020-11-16T07:18:31
7,262
0
0
0
Python
false
false
import bcrypt def encrypt_password(password): """ Encrypt a password with a randomly generated salt then a hash. :param password: The password to encrypt in clear text. :return: The encrypted password as a unicode string. """ encoded_password = password.encode('utf8') cost_rounds = 4 random_salt = bcrypt.gensalt(cost_rounds) hashed_password = bcrypt.hashpw(encoded_password, random_salt).decode('utf8', 'strict') return hashed_password def check_password(password, password_hash): """ Check a password against its encrypted hash for a match. :param password: the password to check in clear text. (Unicode) :param password_hash: The encrypted hash to check against. (Unicode) :return: Whether the password and the hash match """ encoded_password = password.encode('utf8') encoded_password_hash = password_hash.encode('utf8') password_matches = bcrypt.checkpw(encoded_password, encoded_password_hash) return password_matches if __name__ == '__main__': test_password = 'doobeedoo' hashed_test_password = encrypt_password(test_password) print(f'hashed_password: {hashed_test_password}') password_matches_hash = check_password(test_password, hashed_test_password) print(f'password matches hash? {password_matches_hash}')
UTF-8
Python
false
false
1,322
py
19
encryption.py
12
0.711044
0.707262
0
34
37.911765
91
doublevcodes/pyhmrc
5,574,867,575,817
33cc9e74364e9c6a781dcccbae6ae8c1f822d1e7
7c5e543762f1a7c9ac134b7e0ce084724e8e113e
/pyhmrc/hello/__init__.py
1be72be2be386b6f8329c2ea5f26ad858205bd54
[]
no_license
https://github.com/doublevcodes/pyhmrc
5ee44ed968357b9db7ae4ce9fb91085651192a28
fdc5eff0d14332b3f718060fc33316952a0e411b
refs/heads/master
2023-04-02T22:34:30.769236
2021-04-09T21:35:17
2021-04-09T21:35:17
356,403,147
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from .hello import HelloClient
UTF-8
Python
false
false
30
py
5
__init__.py
4
0.866667
0.866667
0
1
30
30
TopologicLogic/Convusing
2,851,858,284,551
b9ac8f3a82186ee4c186b6918b5dc76215f9b016
63f21c66119a6b3a752a8dcd8377e0c512ec4cc2
/DataPrep.py
18c40cf929986974a66f9e97611e4490875bdd62
[]
no_license
https://github.com/TopologicLogic/Convusing
3cca2e52a4c141d00988b78bc09630501f544bfc
f8445ea81828f53dd91b9d1cccc0a28ac7f1dbf3
refs/heads/master
2022-07-17T04:31:11.199671
2022-05-29T03:35:11
2022-05-29T03:35:11
254,484,222
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Mar 10 20:19:35 2018 @author: Dev """ import random import pandas as pan import numpy as np import os.path import csv import sys #import ctypes # An included library with Python install. from copy import deepcopy from stockstats import StockDataFrame as sdf from googlefinance.client import get_price_data #, get_prices_data, get_prices_time_data data_lookup = {} def find_intervals(projection_range, row_start, sdfdata, interval_min, interval_max): interval_index = [] #tshape = sdfdata.iloc[:,0:].values.shape #if row_start >= tshape[0]: return interval_index tdata = sdfdata['close_-' + str(projection_range+1) +'_r'].as_matrix() # print(str(tdata)) for i in range(row_start, len(tdata)): if tdata[i] >= interval_min and tdata[i] <= interval_max: interval_index.append(i) return interval_index def get_indicator_data_at_index(index, data_row_count, sdfdata, indicators, intervals, normalize=True): x_data = np.zeros((len(indicators) * len(intervals), data_row_count)) l = 0 if normalize: for m in range(0, len(indicators)): for n in range(0, len(intervals)): # A one dimensional array with the indicator/interval data tdata = sdfdata[indicators[m] + '_' + intervals[n]].as_matrix() dmax = sys.float_info.min # Check all of tdata for NaNs, Infinities, and get column max for t in range(0, len(tdata)): if not np.isfinite(tdata[t]): tdata[t] = 0 if tdata[t] > dmax: dmax = tdata[t] for t in range(0, len(tdata)): tdata[t] /= dmax q = 0 for t in range(index - data_row_count, index): x_data[l][q] = tdata[t] q += 1 l += 1 else: for m in range(0, len(indicators)): for n in range(0, len(intervals)): tdata = sdfdata[indicators[m] + '_' + intervals[n]].as_matrix() q = 0 for t in range(index - data_row_count, index): x_data[l][q] = tdata[t] if not np.isfinite(x_data[l][q]): x_data[l][q] = 0 q += 1 l += 1 return x_data def homogeneous_populate_training(n_classes, batch_count, data_column_count, data_row_count, projection_range, check_for_zeros=True, track_classes=True, verbose=True): global data_lookup indicators = ['rsi', 'atr', 'wr', 'vr'] intervals = ['2', '5', '10', '15', '20', '30', '60'] x_train = np.zeros((batch_count, data_column_count, data_row_count)) y_train = np.zeros((batch_count, n_classes), dtype=int) symbols = [] #https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NASDAQ&render=download with open('NASD.csv', newline='') as csvfile: r = csv.DictReader(csvfile) for row in r: symbols.append(row['Symbol']) class_max = [-50, -20, -15, -10, -5, -4, -3, -2, -1, -0.1, 0, 1, 2, 3, 4, 5, 10, 15, 20, 50, 10000] class_min = [-10000, -50, -20, -15, -10, -5, -4, -3, -2, -1, -0.1, 0, 1, 2, 3, 4, 5, 10, 15, 20, 50 ] avg_class_count = np.floor((batch_count) / n_classes) class_count = np.zeros(n_classes, dtype=int) class_count.fill(avg_class_count) diff = batch_count - (avg_class_count * n_classes) if diff > 0: class_count[10] += diff if not os.path.exists('StockData/'): os.makedirs('StockData/') batch_i = 0 for z in range(0, n_classes): skips = [] # Skiped due to data errors nics = [] # Not in class if track_classes: if os.path.isfile("skips.csv"): with open('skips.csv', newline='\n') as csvfile: r = csv.DictReader(csvfile) for row in r: skips.append(row['Symbols']) else: with open("skips.csv", "w") as text_file: text_file.write("""Symbols""" + "\n") if os.path.isfile("nic-" + str(z) + ".csv"): with open("nic-" + str(z) + ".csv", newline='\n') as csvfile: r = csv.DictReader(csvfile) for row in r: nics.append(row['Symbols']) else: with open("nic-" + str(z) + ".csv", "w") as text_file: text_file.write("""Symbols""" + "\n") if verbose: print("\nClass #" + str(z+1) + " of " + str(n_classes) + ", Total skips: " + str(len(skips) + len(nics)) + "\n") elif verbose: print("\nClass #" + str(z) + " of " + str(n_classes)) symbols_used = [] k = 0 while k < class_count[z]: tclass = z tsymbol = symbols[np.random.randint(0, len(symbols))] # Went through all the symbols and cound't find enough examples, # so fill up more default values. if len(symbols_used) >= len(symbols): tclass = 10 # Check for stocks to skip if track_classes: if tsymbol in skips: continue if tsymbol in nics: continue if tsymbol in symbols_used: continue symbols_used.append(tsymbol) tshape = object() if tsymbol in data_lookup: if verbose: print("Loading["+ str(class_min[tclass]) + "%:" + str(class_max[tclass]) + "%, " + "#" + str(k) + "]: " + tsymbol) df = data_lookup[tsymbol] elif os.path.isfile("StockData/" + tsymbol + ".csv"): if verbose: print("Loading["+ str(class_min[tclass]) + "%:" + str(class_max[tclass]) + "%, " + "#" + str(k) + "]: " + tsymbol) data_lookup[tsymbol] = pan.read_csv("StockData/" + tsymbol + ".csv", sep=',', header=0, index_col=0) df = data_lookup[tsymbol] else: if verbose: print("Downloading["+ str(class_min[tclass]) + "%:" + str(class_max[tclass]) + "%, " + "#" + str(k) + "]: " + tsymbol) param = { 'q': tsymbol, # Stock symbol (ex: "AAPL") 'i': "86400", # Interval size in seconds ("86400" = 1 day intervals) 'x': "NASD", # Stock exchange symbol on which stock is traded (ex: "NASD") 'p': "5Y" # Period (Ex: "1Y" = 1 year) } # get price data (return pandas dataframe) df = get_price_data(param) df.to_csv("StockData/" + tsymbol + ".csv") data_lookup[tsymbol] = deepcopy(df) tshape = df.iloc[:,0:].values.shape if tshape[0] <= data_row_count: if verbose: print("Data error: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue if tshape[0] < 400: if verbose: print("Not enough data for: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue # Check for zeros if check_for_zeros: zero_flag = False for row in range(0, tshape[0]): for column in range(0, tshape[1]): v = df.iloc[:,column:].values[row][0] if v <= 0: zero_flag = True break if zero_flag: break if zero_flag: if verbose: print("Zeros in: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue sdfdata = sdf.retype(df) indicies = find_intervals(projection_range, data_row_count + projection_range, sdfdata, class_min[tclass], class_max[tclass]) if len(indicies) > 0: random.shuffle(indicies) for i in range(0, len(indicies)): if k < class_count[z] and batch_i < batch_count: # Add data to the batch array x_train[batch_i] = get_indicator_data_at_index(indicies[i]-projection_range, data_row_count, sdfdata, indicators, intervals) y_train[batch_i][tclass] = 1 k += 1 batch_i += 1 else: break elif track_classes: with open("nic-" + str(z) + ".csv", "a") as text_file: text_file.write(tsymbol + "\n") del sdfdata del df del symbols_used del skips del nics return x_train, y_train def homogeneous_populate_training2(n_classes, batch_count, data_column_count, data_row_count, projection_range, check_for_zeros=True, track_classes=True, verbose=True): global data_lookup indicators = ['rsi', 'atr', 'wr', 'vr'] intervals = ['2', '5', '10', '15', '20', '30', '60'] x_train = np.zeros((batch_count, data_column_count, data_row_count)) y_train = np.zeros((batch_count, n_classes), dtype=int) symbols = [] #https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NASDAQ&render=download with open('NASD.csv', newline='') as csvfile: r = csv.DictReader(csvfile) for row in r: symbols.append(row['Symbol']) class_max = [-50, -20, -15, -10, -5, -4, -3, -2, -1, -0.1, 0, 1, 2, 3, 4, 5, 10, 15, 20, 50, 10000] class_min = [-10000, -50, -20, -15, -10, -5, -4, -3, -2, -1, -0.1, 0, 1, 2, 3, 4, 5, 10, 15, 20, 50 ] avg_class_count = np.floor((batch_count) / n_classes) class_count = np.zeros(n_classes, dtype=int) class_count.fill(avg_class_count) # If the data is uneven, put it in the center. diff = batch_count - (avg_class_count * n_classes) if diff > 0: class_count[10] += diff if not os.path.exists('StockData/'): os.makedirs('StockData/') batch_i = 0 for z in range(0, n_classes): skips = [] # Skiped due to data errors nics = [] # Not in class if track_classes: if os.path.isfile("skips.csv"): with open('skips.csv', newline='\n') as csvfile: r = csv.DictReader(csvfile) for row in r: skips.append(row['Symbols']) else: with open("skips.csv", "w") as text_file: text_file.write("""Symbols""" + "\n") if os.path.isfile("nic-" + str(z) + ".csv"): with open("nic-" + str(z) + ".csv", newline='\n') as csvfile: r = csv.DictReader(csvfile) for row in r: nics.append(row['Symbols']) else: with open("nic-" + str(z) + ".csv", "w") as text_file: text_file.write("""Symbols""" + "\n") if verbose: print("\nClass #" + str(z+1) + " of " + str(n_classes) + ", Total skips: " + str(len(skips) + len(nics)) + "\n") elif verbose: print("\nClass #" + str(z) + " of " + str(n_classes)) random.shuffle(symbols) for k in range(0, symbols): tclass = z tsymbol = symbols[k] # Check for stocks to skip if track_classes: if tsymbol in skips: continue if tsymbol in nics: continue if os.path.isfile("StockData/" + tsymbol + ".csv"): data_lookup[tsymbol] = pan.read_csv("StockData/" + tsymbol + ".csv", sep=',', header=0, index_col=0) if tsymbol in data_lookup: if verbose: print("Loading["+ str(class_min[tclass]) + "%:" + str(class_max[tclass]) + "%, " + "#" + str(k) + "]: " + tsymbol) df = data_lookup[tsymbol] else: if verbose: print("Downloading["+ str(class_min[tclass]) + "%:" + str(class_max[tclass]) + "%, " + "#" + str(k) + "]: " + tsymbol) param = { 'q': tsymbol, # Stock symbol (ex: "AAPL") 'i': "86400", # Interval size in seconds ("86400" = 1 day intervals) 'x': "NASD", # Stock exchange symbol on which stock is traded (ex: "NASD") 'p': "5Y" # Period (Ex: "1Y" = 1 year) } # get price data (return pandas dataframe) df = get_price_data(param) df.to_csv("StockData/" + tsymbol + ".csv") data_lookup[tsymbol] = deepcopy(df) tshape = df.iloc[:,0:].values.shape if tshape[0] <= data_row_count: if verbose: print("Data error: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue if tshape[0] < 400: if verbose: print("Not enough data for: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue # Check for zeros if check_for_zeros: zero_flag = False for row in range(0, tshape[0]): for column in range(0, tshape[1]): v = df.iloc[:,column:].values[row][0] if v <= 0: zero_flag = True break if zero_flag: break if zero_flag: if verbose: print("Zeros in: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue sdfdata = sdf.retype(df) indicies = find_intervals(projection_range, data_row_count + projection_range, sdfdata, class_min[tclass], class_max[tclass]) if len(indicies) > 0: random.shuffle(indicies) for i in range(0, len(indicies)): if k < class_count[z] and batch_i < batch_count: # Add data to the batch array x_train[batch_i] = get_indicator_data_at_index(indicies[i]-projection_range, data_row_count, sdfdata, indicators, intervals) y_train[batch_i][tclass] = 1 k += 1 batch_i += 1 else: break elif track_classes: with open("nic-" + str(z) + ".csv", "a") as text_file: text_file.write(tsymbol + "\n") del sdfdata del df del skips del nics return x_train, y_train def random_normal_populate_training(n_classes, batch_count, data_column_count, data_row_count, projection_range, mu, sigma, check_for_zeros=True, track_classes=True, verbose=True): global data_lookup indicators = ['rsi', 'atr', 'wr', 'vr'] intervals = ['2', '5', '10', '15', '20', '30', '60'] x_train = np.zeros((batch_count, data_column_count, data_row_count)) y_train = np.zeros((batch_count, n_classes), dtype=int) symbols = [] #https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NASDAQ&render=download with open('NASD.csv', newline='') as csvfile: r = csv.DictReader(csvfile) for row in r: symbols.append(row['Symbol']) class_max = [-50, -20, -15, -10, -5, -4, -3, -2, -1, -0.1, 0, 1, 2, 3, 4, 5, 10, 15, 20, 50, 10000] class_min = [-10000, -50, -20, -15, -10, -5, -4, -3, -2, -1, -0.1, 0, 1, 2, 3, 4, 5, 10, 15, 20, 50 ] if not os.path.exists('StockData/'): os.makedirs('StockData/') skips = [] # Skiped due to data errors nics = {} if track_classes: if os.path.isfile("skips.csv"): with open('skips.csv', newline='\n') as csvfile: r = csv.DictReader(csvfile) for row in r: skips.append(row['Symbols']) else: with open("skips.csv", "w") as text_file: text_file.write("""Symbols""" + "\n") for z in range(0, n_classes): nics[z] = [] if os.path.isfile("nic-" + str(z) + ".csv"): with open("nic-" + str(z) + ".csv", newline='\n') as csvfile: r = csv.DictReader(csvfile) for row in r: nics[z].append(row['Symbols']) else: with open("nic-" + str(z) + ".csv", "w") as text_file: text_file.write("""Symbols""" + "\n") batch_i = 0 while batch_i < batch_count: tsymbol = symbols[np.random.randint(0, len(symbols))] tclass = int(np.random.normal(mu, sigma)) if tclass >= n_classes: tclass = n_classes-1 elif tclass < 0: tclass = 0 # Check for stocks to skip if track_classes: if tsymbol in skips: continue if tsymbol in nics[tclass]: continue if tsymbol in data_lookup: if verbose: print("Loading: " + tsymbol) df = data_lookup[tsymbol] elif os.path.isfile("StockData/" + tsymbol + ".csv"): if verbose: print("Loading: " + tsymbol) data_lookup[tsymbol] = pan.read_csv("StockData/" + tsymbol + ".csv", sep=',', header=0, index_col=0) df = data_lookup[tsymbol] else: if verbose: print("Downloading: " + tsymbol) param = { 'q': tsymbol, # Stock symbol (ex: "AAPL") 'i': "86400", # Interval size in seconds ("86400" = 1 day intervals) 'x': "NASD", # Stock exchange symbol on which stock is traded (ex: "NASD") 'p': "5Y" # Period (Ex: "1Y" = 1 year) } # get price data (return pandas dataframe) df = get_price_data(param) df.to_csv("StockData/" + tsymbol + ".csv") data_lookup[tsymbol] = deepcopy(df) tshape = df.iloc[:,0:].values.shape if tshape[0] <= data_row_count: if verbose: print("Data error: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue if tshape[0] < 400: if verbose: print("Not enough data for: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue # Check for zeros if check_for_zeros: zero_flag = False for row in range(0, tshape[0]): for column in range(0, tshape[1]): v = df.iloc[:,column:].values[row][0] if v <= 0: zero_flag = True break if zero_flag: break if zero_flag: if verbose: print("Zeros in: " + tsymbol + ", continuing to next symbol...") if track_classes: with open("skips.csv", "a") as text_file: text_file.write(tsymbol + "\n") continue sdfdata = sdf.retype(df) indicies = find_intervals(projection_range, data_row_count + projection_range, sdfdata, class_min[tclass], class_max[tclass]) if len(indicies) > 0: i = np.random.randint(0, len(indicies)) if batch_i < batch_count: # Add data to the batch array x_train[batch_i] = get_indicator_data_at_index(indicies[i]-projection_range, data_row_count, sdfdata, indicators, intervals) y_train[batch_i][tclass] = 1 batch_i += 1 else: break elif track_classes: with open("nic-" + str(tclass) + ".csv", "a") as text_file: text_file.write(tsymbol + "\n") del sdfdata del df del skips del nics return x_train, y_train
UTF-8
Python
false
false
24,109
py
28
DataPrep.py
7
0.427517
0.411216
0
596
38.454698
110
bettersleepearly/Programming-Beginner-2
14,766,097,597,640
5592b36b79250beb3b4b2372cdbb1467f1f42ac7
34200997a02dc3f6f733c297b48d47ad9566e3d8
/Shoppinproj.py
1ecdfaf10f80d3b716c38fb1ae3c97f46ec111c3
[]
no_license
https://github.com/bettersleepearly/Programming-Beginner-2
4396844be3de449017b132fa67042977d2364e19
0ef1c4b224716f51a4f47d40f5b5d08bc588d877
refs/heads/master
2022-10-14T15:30:26.608237
2020-06-09T15:15:14
2020-06-09T15:15:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
currency = 'USD' price = {'CB1':139, 'CB2':149, 'CB3':139, 'CB4':139, 'CB5':169, 'CB6':139, 'CB7':139, 'CB8':139, 'CB9':139, 'CB10':149, 'CB11':134, 'CB12':129, 'LB1':174, 'LB2':174, 'LB3':174, 'LB4':199, 'LB5':199, 'LB6':199, 'LB7':189, 'LB8':189, 'LB9':179, 'LB10':244, 'LB11':244, 'LB12':244} cb1 = 'Dinghy Tiger:' cb2 = 'Dinghy FG Watercolor:' cb3 = 'Dinghy Crown Peak:' cb4 = 'Dinghy BK:' cb5 = 'Gordito Pantera:' cb6 = 'Ditchlife:' cb7 = 'Wrecktangle Lighthouse:' cb8 = 'Tugboat Captain:' cb9 = 'Tugboat Chill Cat:' cb10 = 'Tugboat Midnight Snek:' cb11 = 'Dinghy Arctic Fox:' cb12 = 'Mighty Mite:' lb1 = 'Super Chief Watercolor:' lb2 = 'Ripper Humanoid:' lb3 = 'Ripper Watercolor:' lb4 = 'Drop Carve 40 Fox:' lb5 = 'Drop Cat 33 Illuminacion:' lb6 = 'Drop Cat 38 Seeker:' lb7 = 'Pinner Night Moves:' lb8 = 'Totem Blaze:' lb9 = 'Chief Eyes:' lb10 = 'Switchblade 38 Tropic:' lb11 = 'Nine Two Five Horror:' lb12 = 'Hollowtech Switchblade 36 Lizard:' print("\n _____________") print("| |") print("| L A N D |") print("| Y A T C H Z |") print("|_____________|") print('\nWE EXPLORE THE WORLD ON SKATEBOARDS \nWe make vehicles for seeking out adventure, finding joy in self-expression and improving our interactions with the world around us.') print('\nInterested in our products? Wanna dive in?') question = input('\nIf yes, type Y. \nType N to exit.') if question == "N": print('___________________________________________________________________________________________________________________________________________________') print('\nWell then... Fuck off mate.') if question == "Y": print('___________________________________________________________________________________________________________________________________________________') print("\nBefore finding you a perfect skateboard, \nLet's create a account") username = input('\nMay I have your username:') print("\nOkay" + " " + username + ",") print("May I have your age? (since our branding and products are not suited for ages under 18):") age = int(input()) if age < 18: print('___________________________________________________________________________________________________________________________________________________') print("\nI am sorry that you can not countinue" + " " + username) print("\nUnderage usage of our products and contents is illegal...\nSo fuck off kid!") if age >= 18: print('___________________________________________________________________________________________________________________________________________________') print("\nRequirement reached,\nYour access is now granted") print("\nEnjoy shopping skateboards!") print("\nWe own a large varieties of skateboards and longboards with different styles and uses.") print("\nOur 2 main lines are recommended the most,") boards = "\n(1) Cruiser boards\n(2) Longboards\nPick one (type a number):" print(boards) boardpick = str(input()) if boardpick == "1": print('___________________________________________________________________________________________________________________________________________________') print("\nThe most fun and capable boards in our line-up, these boards are the best bang for your buck available today.") print('Whether it’s your first board or your tenth, there’s always room in your quiver for a good cruiser and you’ll quickly find it becoming your go-to in all sorts of situations.') print('___________________________________________________________________________________________________________________________________________________') print('\n1.'+cb1+ " "+ str(price.get('CB1')) +" "+ currency+'\n(No Picture LMAO)') print('\n2.'+cb2+ " "+ str(price.get('CB2')) +" "+ currency+'\n(No Picture LMAO)') print('\n3.'+cb3+ " "+ str(price.get('CB3')) +" "+ currency+'\n(No Picture LMAO)') print('\n4.'+cb4+ " "+ str(price.get('CB4')) +" "+ currency+'\n(No Picture LMAO)') print('\n5.'+cb5+ " "+ str(price.get('CB5')) +" "+ currency+'\n(No Picture LMAO)') print('\n6.'+cb6+ " "+ str(price.get('CB6')) +" "+ currency+'\n(No Picture LMAO)') print('\n7.'+cb7+ " "+ str(price.get('CB7')) +" "+ currency+'\n(No Picture LMAO)') print('\n8.'+cb8+ " "+ str(price.get('CB8')) +" "+ currency+'\n(No Picture LMAO)') print('\n9.'+cb9+ " "+ str(price.get('CB9')) +" "+ currency+'\n(No Picture LMAO)') print('\n10.'+cb10+ " "+ str(price.get('CB10')) +" "+ currency+'\n(No Picture LMAO)') print('\n11.'+cb11+ " "+ str(price.get('CB11')) +" "+ currency+'\n(No Picture LMAO)') print('\n12.'+cb12+ " "+ str(price.get('CB12')) +" "+ currency+'\n(No Picture LMAO)') print('\nSo...Pick one (by typing "CB" + the number of the board):') action = str(input('To return to the menu,\nType "menu"\nType "mycart" to go to your cart and further check-out there.')) print(action) print('___________________________________________________________________________________________________________________________________________________') if boardpick == "2": print('___________________________________________________________________________________________________________________________________________________') print('Our Longboards are designed to get you out exploring your environment, no matter what kind of terrain you have surrounding you.\nThe boards in this category come in two deck styles; Top mounted or Drop-through.') print('\nTop mount boards give you tons of leverage over your trucks, giving you a deeper carving, surfy feel and a lively ride underfoot.') print('Drop-through boards are lower to the ground, making them driftier, more stable and blurring the lines between longboarding and freeriding.') print('\n1.'+lb1+ " "+ str(price.get('LB1')) +" "+ currency+'\n(No Picture LMAO)') print('\n2.'+lb2+ " "+ str(price.get('LB2')) +" "+ currency+'\n(No Picture LMAO)') print('\n3.'+lb3+ " "+ str(price.get('LB3')) +" "+ currency+'\n(No Picture LMAO)') print('\n4.'+lb4+ " "+ str(price.get('LB4')) +" "+ currency+'\n(No Picture LMAO)') print('\n5.'+lb5+ " "+ str(price.get('LB5')) +" "+ currency+'\n(No Picture LMAO)') print('\n6.'+lb6+ " "+ str(price.get('LB6')) +" "+ currency+'\n(No Picture LMAO)') print('\n7.'+lb7+ " "+ str(price.get('LB7')) +" "+ currency+'\n(No Picture LMAO)') print('\n8.'+lb8+ " "+ str(price.get('LB8')) +" "+ currency+'\n(No Picture LMAO)') print('\n9.'+lb9+ " "+ str(price.get('LB9')) +" "+ currency+'\n(No Picture LMAO)') print('\n10.'+lb10+ " "+ str(price.get('LB10')) +" "+ currency+'\n(No Picture LMAO)') print('\n11.'+lb11+ " "+ str(price.get('LB11')) +" "+ currency+'\n(No Picture LMAO)') print('\n12.'+lb12+ " "+ str(price.get('LB12')) +" "+ currency+'\n(No Picture LMAO)') print('\nPick one (by typing "LB" + the number of the board):') action = str(input('To return to the menu,\nType "menu"\nType "mycart" to go to your cart and further check-out there.')) print(action) print('___________________________________________________________________________________________________________________________________________________') instore = True while action == "menu": instore = True print("O U R B O A R D S") print(boards) boardpick print('___________________________________________________________________________________________________________________________________________________') #if action == "mycart": #print(AHHA) if action != "menu": cart = [int(price.get(action))] visualcart = ("Your Cart:" + " " + str(cart) +" "+ currency) print(visualcart) print('___________________________________________________________________________________________________________________________________________________') instore = True while action: instore = True action = str(input('Anymore sellection? my friend :)\nType here:')) print(visualcart)
UTF-8
Python
false
false
8,680
py
3
Shoppinproj.py
2
0.450542
0.422527
0
166
50.168675
231
junk13/fiercecroissant
4,939,212,400,068
b87b759aed94b2313d3bd88bf3d2e071301fb827
9a18fa277118715e33c68763a98f40b3df0a2448
/fiercecroissant.py
04d64756dc190e3efc7fd7a3fa2627ece451df34
[]
no_license
https://github.com/junk13/fiercecroissant
64bbe6831143978ccb756e4c93d812bcc1711798
26b88dd6f3068f40cc74119611b30a90542ce880
refs/heads/master
2020-04-08T16:21:16.296844
2018-11-08T20:42:57
2018-11-08T20:42:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python3 import requests, json, time, sys, os, re, configparser, base64 from pymongo import MongoClient from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry client = MongoClient('localhost:27017') db = client.fc coll_pastemetadata = client.fc.pastemetadata paste_data = "" save_path = os.getcwd() + '/pastes/' #Where keyword matching pastes get saved save_path_base64 = save_path + '/base64pastes/' save_path_hex = save_path + '/hexpastes/' save_path_binary = save_path + '/binarypastes/' save_path_php = save_path + '/phppastes/' save_path_img = save_path + '/imgpastes/' save_path_ascii = save_path + '/asciipastes/' save_path_ps = save_path + '/pspastes/' # Config file for token or key interactions. config = configparser.ConfigParser() config.read('config.ini') if not config.has_section('main'): print("\nPlease ensure that your 'config.ini' exists and sets the appropriate values.\n") exit(1) hip_token = config.get('main','hip_token') hip_room = config.get('main', 'hip_room') def scrapebin(): def requests_retry_session(retries=10, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None, params=None): session = session or requests.Session() retry = Retry(total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist) adapter = HTTPAdapter(max_retries=retry) session.mount('https://', adapter) return session def save_paste(path, data): with open(path, 'w', encoding='utf-8') as file: file.write(data) return file.closed def save_metadata(paste, encodingtype): pastemetadata_dict = {'date': [], 'key': [], 'size': [], 'expire': [], 'syntax': [], 'user':[], 'encodingtype':[]} pastemetadata_dict.update({'date':paste['date'], 'key':paste['key'], 'size':paste['size'], 'expire':paste['expire'], 'syntax':paste['syntax'], 'user':paste['user'], 'encodingtype':encodingtype}) return pastemetadata_dict def hipchatpost(): #Alerts a HipChat room about a new paste. headers = {'Content-Type': 'application/json'} card = { "style": "link", "url": paste_url, "id": "fee4d9a3-685d-4cbd-abaa-c8850d9b1960", "title": "Pastebin Hit", "description": { "format": "html", "value": "<b>New Paste Seen:</b>" + paste_url + " Encoded as:" + encodingtype }, "icon": { "url": "https://pastebin.com/favicon.ico" }, "date": 1443057955792 } data_json = {'message': '<b>New Paste<b>', 'card': card, 'message_format': 'html'} params = {'auth_token': hip_token} try: r = requests.post('https://api.hipchat.com/v2/room/' + hip_room + '/notification', data=json.dumps(data_json),headers=headers, params=params) except: pass while True: hits = 0 r = requests_retry_session().get('https://scrape.pastebin.com/api_scraping.php', params={'limit': 100}) recent_items = None try: recent_items = r.json() except json.decoder.JSONDecodeError as e: print(('Exception raised decoding JSON: {}').format(e)) continue for i, paste in enumerate(recent_items): pb_raw_url = 'https://pastebin.com/raw/' + paste['key'] paste_data = requests.get(pb_raw_url).text paste_lang = paste['syntax'] paste_size = paste['size'] paste_url = paste['full_url'] stringmatch = re.search(r'(A){20}', paste_data) #Searching for 20 'A's in a row. stringmatch_76 = re.search(r'(A){76}', paste_data) #Searching for 76 'A's in a row. nonwordmatch = re.search(r'\w{200,}', paste_data) #Searching for 200 characters in a row to get non-words. base64match = re.search(r'\A(TV(oA|pB|pQ|qQ|qA|ro|pA))', paste_data) #Searches the start of the paste for Base64 encoding structure for an MZ executable. base64reversematch = re.search(r'((Ao|Bp|Qp|Qq|Aq|or|Ap)VT)\Z', paste_data) #Searches the end of the paste for reversed Base64 encoding structure for an MZ executable. binarymatch = re.search(r'(0|1){200,}', paste_data) #Searches for 200 0's or 1's in a row. hexmatch = re.search(r'(\\x\w\w){100,}', paste_data) #Regex for hex formatted as "\\xDC", "\\x02", "\\xC4" hexmatch2 = re.search(r'[2-9A-F]{200,}', paste_data) #Regex for Hexadecimal encoding. hexmatch3 = re.search(r'([0-9A-F ][0-9A-F ][0-9A-F ][0-9A-F ][0-9A-F ]){150,}', paste_data) #Regex for hex formatted as "4D ", "5A ", "00 " in groups of at least 150. phpmatch = re.search(r'\A(<\?php)', paste_data) #Searches the start of a paste for php structure. imgmatch = re.search(r'\A(data:image)', paste_data) #Searches the start of a paste for data:image structure. asciimatch = re.search(r'\A(77 90 144 0 3 0 0 0)', paste_data) #Searches the start of a paste for '77 90 144 0 3 0 0 0' to filter ASCII. powershellmatch = re.search(r'powershell', paste_data) if ((((nonwordmatch or stringmatch) or (stringmatch_76 and (base64match or base64reversematch)) or hexmatch3) and int(paste_size) > 40000) or (powershellmatch and int(paste_size) < 10000)) and paste_lang == "text" and coll_pastemetadata.find_one({'key':paste['key']}) is None: if (binarymatch and paste_data.isnumeric()): filename = save_path_binary + paste['key'] encodingtype = 'binary' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() elif (base64match or base64reversematch): filename = save_path_base64 + paste['key'] encodingtype = 'base64' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() elif asciimatch: filename = save_path_ascii + paste['key'] encodingtype = "ASCII" save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() elif (hexmatch or hexmatch2 or hexmatch3): filename = save_path_hex + paste['key'] encodingtype = 'hexadecimal' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() elif phpmatch: filename = save_path_php + paste['key'] encodingtype = 'php' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() elif imgmatch: filename = save_path_img + paste['key'] encodingtype = 'img' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() elif powershellmatch: filename = save_path_ps + paste['key'] encodingtype = 'powershell' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() else: filename = save_path + paste['key'] encodingtype = 'other' save_paste(filename, paste_data) metadata = save_metadata(paste, encodingtype) coll_pastemetadata.insert_one(metadata) hipchatpost() time.sleep(60) if __name__ == "__main__": while True: scrapebin()
UTF-8
Python
false
false
8,511
py
3
fiercecroissant.py
1
0.569968
0.548584
0.00047
162
51.537037
288
fprimex/lad
4,355,096,853,786
1d394c3a901e45ace142d60e8ab01314f1299677
a265442582d35030a22a207930bc849c5bd73c9e
/NotebookPage/VarsPage.py
92c8f7f5b0d28430bf44c6fdcb8e57c3c9ecaef4
[ "Apache-2.0" ]
permissive
https://github.com/fprimex/lad
48ebff8f83a82b7ec888c26234466c7f8026ac95
493e998eae351252cf78736b803ddc88de6abead
refs/heads/main
2023-06-09T01:22:45.487011
2018-08-26T04:22:35
2018-08-26T04:22:35
146,149,601
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import wx import Globals from ListEditorCtrl import ListEditorCtrl class VarsPage(wx.Panel): def __init__(self, parent, id=-1): wx.Panel.__init__(self, parent, -1) cols = [u"var", u"val / exp"] listdata = {} i = 0 for var in Globals.G.vars.keys(): listdata[i] = (var, Globals.G.var_exps[var]) i += 1 self.vars_ctrl = ExpListCtrl(self, -1, cols, listdata, style=wx.LC_REPORT | wx.BORDER_NONE) page_sizer = wx.BoxSizer(wx.VERTICAL) page_sizer.Add(self.vars_ctrl, 1, wx.EXPAND, 0) self.SetSizer(page_sizer) Globals.canvas.on_node_create_funcs.append(self.vars_ctrl.RefreshList) Globals.canvas.on_edge_create_funcs.append(self.vars_ctrl.RefreshList) Globals.canvas.on_node_delete_funcs.append(self.vars_ctrl.RefreshList) Globals.canvas.on_edge_delete_funcs.append(self.vars_ctrl.RefreshList) Globals.canvas.on_drag_end_funcs.append(self.vars_ctrl.RefreshList) def Reinit(self): listdata = {} i = 0 for var in Globals.G.vars.keys(): listdata[i] = (var, Globals.G.var_exps[var]) i += 1 self.vars_ctrl.listctrldata = listdata self.vars_ctrl.RefreshList() class ExpListCtrl(ListEditorCtrl): def __init__(self, parent, ID, headings, listdata, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0): ListEditorCtrl.__init__(self, parent, ID, headings, listdata, pos, size, style) def GetRowValues(self, row): G = Globals.G var = self.listctrldata[row][0] exp = self.listctrldata[row][1] try: value = eval(exp) except: value = u"ERROR" Globals.G.var_exps[var] = exp Globals.G.vars[var] = value return (var, repr(value)) def GetEditValue(self, row, col): if col == 1: return self.listctrldata[row][1] else: return self.GetItem(row, col).GetText() def SetValue(self, row, col, text): # save exp text back into the expression dict if col == 0: del Globals.G.vars[self.listctrldata[row][0]] del Globals.G.var_exps[self.listctrldata[row][0]] self.listctrldata[row] = (text, self.listctrldata[row][1]) else: self.listctrldata[row] = (self.listctrldata[row][0], text) def PreDelete(self, row): var = self.GetItem(row, 0).GetText() del Globals.G.vars[var] del Globals.G.var_exps[var] def PostInsert(self, row): var = self.listctrldata[row][0] Globals.G.vars[var] = None Globals.G.var_exps[var] = None
UTF-8
Python
false
false
2,717
py
17
VarsPage.py
15
0.589253
0.581524
0
76
34.75
99
michealbradymahoney/CP1404-2019SP2
11,982,958,756,874
9873ffb1c7851050c46805bfef2b2a308b08c0da
13311af3281150ffbdc927ffe7e6b547a1bb0899
/Practicals/prac_02/password_checker.py
4376c0efbaf9488b272f10db62e68dbc7bdcbc12
[]
no_license
https://github.com/michealbradymahoney/CP1404-2019SP2
263db4593d8bd5a137e9ba13a0330c8cb566f82a
02a7c985e0eb4e0d76add18f7541eaee7eeba327
refs/heads/master
2020-06-30T08:42:39.520236
2019-08-31T01:55:13
2019-08-31T01:55:13
200,781,949
0
0
null
false
2019-10-08T07:25:35
2019-08-06T05:22:40
2019-08-31T01:55:17
2019-10-08T07:22:43
2,446
0
0
2
Python
false
false
""" CP1404/CP5632 - Practical Password checker "skeleton" code to help you get started """ # Write a program that asks for and validates a person's password. # The program is not for comparing a password to a known password, but validating the 'strength' of a new password, # like you see on websites: enter your password and then it tells you if it's valid (matches the required pattern) # and re-prompts if it's not. # All passwords must contain at least one each of: number, lowercase and uppercase character. # # The starter code uses constants (variables at the top of the code, named in ALL_CAPS) to store: # # a. the minimum and maximum length of the password # b. whether or not a special character (not alphabetical or numerical) is required # # Remember when a program has constants, you should use them everywhere you can so that if you change them at the top, # this change affects the whole program as expected. # E.g. if you changed the minimum length to 5, the program should print 5 and should check to make sure the # password is >= 5 characters long. MIN_LENGTH = 5 MAX_LENGTH = 15 SPECIAL_CHARS_REQUIRED = False SPECIAL_CHARACTERS = "!@#$%^&*()_-=+`~,./'[]<>?{}|\\" def main(): """Program to get and check a user's password.""" print("Please enter a valid password") print("Your password must be between", MIN_LENGTH, "and", MAX_LENGTH, "characters, and contain:") print("\t1 or more uppercase characters") print("\t1 or more lowercase characters") print("\t1 or more numbers") if SPECIAL_CHARS_REQUIRED: print("\tand 1 or more special characters: ", SPECIAL_CHARACTERS) password = input("> ") while not is_valid_password(password): print("Invalid password!") password = input("> ") print("Your {}-character password is valid: {}".format(len(password), password)) def is_valid_password(password): """Determine if the provided password is valid.""" if len(password) < MIN_LENGTH or len(password) > MAX_LENGTH: return False count_lower = 0 count_upper = 0 count_digit = 0 count_special = 0 for char in password: if char.isdigit(): count_digit += 1 elif char.islower(): count_lower += 1 elif char.isupper(): count_upper += 1 elif char in SPECIAL_CHARACTERS: count_special += 1 if count_lower == 0 or count_upper == 0 or count_digit == 0: return False if SPECIAL_CHARS_REQUIRED: if count_special == 0: return False # if we get here (without returning False), then the password must be valid return True main()
UTF-8
Python
false
false
2,726
py
20
password_checker.py
18
0.652238
0.641233
0
72
36.861111
118
lanzorg/winup
4,226,247,843,373
c4514a58b11ff050792254cccfb14d44646f8bde
beae4c7e27bc10f8f53f7ece6c33e13359936d3b
/winup/helpers/downloaders.py
028920ed24cc9bbd86fbfac18c70ffc2b705f59e
[]
no_license
https://github.com/lanzorg/winup
771537d4de6efcc76e0effa059154b2d7871f667
e2545e10f5caa5d20c24fb756cb6819b345baf9f
refs/heads/master
2020-05-02T12:14:31.258250
2019-06-11T08:30:41
2019-06-11T08:30:41
177,953,474
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import tempfile import requests import rfc6266_parser def download_from_url(url, cookies=None): """Download a file from the direct download url.""" response = requests.get(url=url, cookies=cookies, allow_redirects=True, stream=True) file_path = os.path.join( tempfile.mkdtemp(), rfc6266_parser.parse_requests_response(response).filename_unsafe, ) with open(file_path, "wb") as f: for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() return file_path
UTF-8
Python
false
false
593
py
13
downloaders.py
11
0.642496
0.62226
0
23
24.782609
88
valexby/text-classification-api
2,980,707,338,330
dfd78c37f525c9cd28cf1dd87489ab7536e0cfc7
c6e73a840037d831b95af3687d592536dccf6dbb
/core/classifier.py
24e58c9562bea9ce33064cac7ea3768a0cdb4298
[]
no_license
https://github.com/valexby/text-classification-api
c3c4e18c56b8ef1303f542048f2c590b6cd59f1e
b75d49a7d8f7c20a2c92c1115cf0d6762b23649a
refs/heads/master
2020-04-29T22:13:49.067198
2019-03-20T18:23:48
2019-03-20T18:23:48
176,439,462
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pickle from nltk import word_tokenize from nltk.stem.snowball import SnowballStemmer from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer from sklearn.pipeline import Pipeline class Stemmer: def __init__(self): self.stemmer = SnowballStemmer('english').stem def __call__(self, text): return [self.stemmer(i) for i in word_tokenize(text)] class Classifier: def __init__(self, model=None): self.model = model def load(self, model_path): with open(model_path, 'rb') as model_file: self.model = pickle.load(model_file) self.model.steps[0][1].tokenizer = Stemmer() def dump(self, model_path): self.model.steps[0][1].tokenizer = None with open(model_path, 'wb') as model_file: pickle.dump(self.model, model_file) def predict(self, text): predicted = self.model.predict([text]) return int(predicted[0])
UTF-8
Python
false
false
1,044
py
8
classifier.py
4
0.676245
0.671456
0
33
30.636364
61
minato1203/P8test1
266,287,972,815
42dfcd123f38a1189026febd63428f8ebf8485c1
21640cbd34a2f338fe51a79eb38230d4f2cfa010
/b.py
3ae7910eeb3065749fcba27038776875a0014262
[]
no_license
https://github.com/minato1203/P8test1
5d32e5bd8ed3e588794f9e89124e1484eca92d9e
a3b502623ea72209cc5c2c746dafb8873ac473ef
refs/heads/master
2023-04-09T07:42:05.330646
2021-04-28T11:05:45
2021-04-28T11:05:45
362,369,274
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
print("this is only a test text")
UTF-8
Python
false
false
33
py
1
b.py
1
0.727273
0.727273
0
1
33
33
Hoon94/Algorithm
1,855,425,893,552
4c9ebdf11ceab18811cb4bef019f6c7ab36a772a
73bb9d0d50b96b3d7ee48e2d97b1d8128a5f2b1e
/Leetcode/14 Longest Common Prefix.py
9158836808691acfe03b97c39c8efcb0801cabe9
[]
no_license
https://github.com/Hoon94/Algorithm
a0ef211d72a2b78e08249501d197875065392084
6f6969214bbb6bacd165313b6d8c0feb1caa8963
refs/heads/master
2023-05-11T13:12:11.585285
2023-05-08T14:38:47
2023-05-08T14:38:47
244,936,260
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class Solution: def longestCommonPrefix(self, strs: list) -> str: """[summary] Args: strs (List[str]): words in a list Returns: str: return longest common prefix """ result = '' strs = sorted(strs, key=lambda x: len(x)) short = strs[0] if len(strs) > 0 else '' for i in range(len(short)): for word in strs: if short != word[:len(short)]: short = short[:-1] break else: result = short if len(result) > 0: break return result
UTF-8
Python
false
false
655
py
387
14 Longest Common Prefix.py
386
0.439695
0.433588
0
28
22.392857
53
aamini/chemprop
9,844,065,091,729
28cc1e5981291e2bac08e7771584ff96ff4f8d47
7fec152e2f81c8ce35bed6f357937b2d4fd1ff6c
/scripts/create_train_curves.py
55d9a9b4e7380400670f87e969d4810344ef8e37
[ "MIT" ]
permissive
https://github.com/aamini/chemprop
072d712438dc5b3ba554f2734c03cfc4ae64834a
a7a137a09589474a5c5a83f75fbddbddfb877dc8
refs/heads/confidence-evidential
2023-05-23T20:59:52.572809
2021-07-29T16:16:26
2021-07-29T16:16:26
388,299,389
85
16
MIT
false
2021-07-29T16:16:27
2021-07-22T02:05:01
2021-07-29T15:16:18
2021-07-29T16:16:26
292,586
13
1
0
Python
false
false
"""Create train curve from log file Call signatures used: python scripts/create_train_curves.py --log-dir submission_results/gnn/qm9/evidence/ # Note : for this case, it's worth rescaling the x axis python scripts/create_train_curves.py --log-dir submission_results_atomsitic_multi/gnn/qm9/evidence/ --verbose-extension fold_0/model_0/verbose.log """ import matplotlib.pyplot as plt import seaborn as sns import numpy as np import os import argparse import re import pandas as pd def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--log-file", help="name of log file") parser.add_argument("--log-dir", help="name of log dir (eg has diff trials)") parser.add_argument("--verbose-extension", help="path from the trial directories to the log", default="verbose.log") #fold_0/model_0/verbose.log parser.add_argument("--out-name", help="Output name", default="temp.png") #fold_0/model_0/verbose.log return parser.parse_args() def get_val_losses(log_file : str): """ Extract the validation epochs from the log file""" losses = [] ctr = 0 epoch_re = r"Validation mae = (\d+\.\d+) *\nEpoch (\d+)" lines = open(log_file, "r").readlines() for index, line in enumerate(lines[:-1]): ctr += 1 search_str = "".join([line,lines[index+1]]) examples = re.findall(epoch_re, search_str) if len(examples) > 0: loss, epoch = examples[0] losses.append(float(loss)) return losses if __name__=="__main__": args = get_args() log_dir = args.log_dir log_file= args.log_file if log_dir: trial_files = [os.path.join(log_dir, i) for i in os.listdir(log_dir)] epoch_loss_list = [] for log_file in [f"{j}/{args.verbose_extension}" for j in trial_files]: if os.path.isfile(log_file): print(log_file) epoch_losses = get_val_losses(log_file) for index, loss in enumerate(epoch_losses): loss_entry = {"epoch" : index, "loss" : loss} epoch_loss_list.append(loss_entry) df = pd.DataFrame(epoch_loss_list) g = sns.lineplot(data =df, x="epoch", y="loss") #plt.ylim([0,0.5]) plt.savefig(f"{args.out_name}") else: epoch_losses = get_val_losses(log_file)
UTF-8
Python
false
false
2,463
py
65
create_train_curves.py
46
0.590337
0.583435
0
74
32.202703
147
NickNganga/pyhtontake2
17,892,833,760,757
f5c41d06bb1137f0d1aac5b0f1dbedc9604cc91b
c653a1780fd09621bc543a09043e20b172445cce
/task3.py
b3732f92b2f2c6fcba84b9e1af2e3909c2c94ba4
[]
no_license
https://github.com/NickNganga/pyhtontake2
c5f21d4ccdc8375b24d1256a841cbac02a9a35c9
8ab3e25fcd596668f097466e8abd327586b65e71
refs/heads/master
2020-07-29T13:43:11.857697
2019-09-21T19:03:29
2019-09-21T19:03:29
209,826,409
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def list_ends(a_list): return (a_list[0], a_list[len(a_list)-1]) # number of elements num = int(input("Enter number of elements : ")) # Below line read inputs from user using map() function put = list(map(int,input("\nEnter the numbers : ").strip().split()))[:num] # Below Line calls the function created above. put1 = list_ends(put) #Outputs the values under indices '0' & '-1' (the last one in the list). print("\nList is - ", put) #Outputs the values under indices '0' & '-1' (the last one in the list). print("\nNew List is - ", put1)
UTF-8
Python
false
false
547
py
8
task3.py
7
0.665448
0.650823
0
17
31.235294
74
nimble-robotics/NR_pdf_utils
8,847,632,648,903
3cd1b38bc5678ca7783948073e99137351069d99
525ac244cc9c57a113686dcbf0816c06b9c7f026
/application.py
10815c22c65dce9b17a7f0bf25e9fa5985802416
[]
no_license
https://github.com/nimble-robotics/NR_pdf_utils
c9a2f574f976db8ca20d444c703dfb8436bd14c7
d7796aa3088bb598c1dfb8f58f27f3804a216340
refs/heads/master
2021-01-09T01:56:59.067583
2020-03-11T19:00:30
2020-03-11T19:00:30
242,209,366
0
0
null
true
2020-03-11T19:00:31
2020-02-21T18:53:46
2020-03-11T18:47:09
2020-03-11T19:00:30
120
0
0
0
Python
false
false
from flask import Flask,request,render_template # from utils import upload_to_aws,bucket_name from healthcheck import HealthCheck import os application = app = Flask(__name__,template_folder='templates') App_path = os.path.dirname(os.path.abspath(__file__)) @app.route('/home') def home(): return render_template('index.html') def app_status(): """ added health check """ return True,"App is up and running" health = HealthCheck(app,"/healthcheck") health.add_check(app_status) if __name__ == '__main__': app.run(debug = True, host='0.0.0.0', port= 5000 )
UTF-8
Python
false
false
591
py
8
application.py
2
0.673435
0.659898
0
26
21.769231
63
jgraniero52/lab-4
19,507,741,477,552
047a86ff29f23855d0163b2fc3275a5467140a9d
13bff9eb83069bb94456449b588571b3de08b375
/searchEngine.py
d339f4911cb61d57d8ef82541c8bccea4136f994
[]
no_license
https://github.com/jgraniero52/lab-4
a379c3dcead1c3944805241ea03f86480d4066d0
e95e86916ddc1080db1ea4ff8bad68c26d6cf344
refs/heads/master
2021-05-28T02:25:41.088980
2012-10-07T22:34:00
2012-10-07T22:34:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sqlite3 import pickle import os from portStemmer import PorterStemmer from makeBigDict import scanCleanDir class Searcher: def __init__(self): self.stemmer = PorterStemmer() try: f = open(os.getcwd()+"/data/tokensDict.p", "r") self.tokens = pickle.load(f) except: print "Pickle file not found" print "Creating the Dirctionary" self.tokens = scanCleanDir() f = open(os.getcwd()+"/data/tokensDict.p", "w") pickle.dump(self.tokens, f) def dbQuery(self, query, args = ()): conn = sqlite3.connect('/Users/kristofer/comp_490/2lab/data/cache.db') db = conn.cursor() #args should be a tuple of the arguments in the query db.execute(query, args) rows = db.fetchall() conn.close() return rows def singleToken(self): print word = raw_input("Enter your one word query: ") token = word.lower() token = self.stemmer.stem(token, 0, len(token) - 1) try: wordDict = self.tokens[token] except: print word, "does not seem to exist in our files. Please try a different word" print return occurenceTotal = 0 highestFreq = {'freq': 0, 'docs':[]} i = 1 for doc in wordDict.keys(): freq = len(wordDict[doc]) occurenceTotal += freq linksQuery = """ SELECT webPage.linkText, item.itemName FROM ( SElECT itemToWebPage.webPageId, itemToWebPage.itemId FROM itemToWebPage WHERE webPageId = ?) AS linkItem JOIN item ON item.itemId = linkItem.itemId JOIN webPage ON webPage.webPageId = linkItem.webPageId; """ linksRow = self.dbQuery(linksQuery, (doc,)) print print i,"\t",linksRow[0][0] print "\t item: ",linksRow[0][1] print "\t occured ",freq,"times" i += 1 if freq > highestFreq['freq']: highestFreq['freq'] = freq highestFreq['docs'] = [linksRow[0][0]] elif freq == highestFreq['freq']: highestFreq['docs'].append(doc) print print "Total occurence of", word, "is", occurenceTotal, "times" print "Highest frequency: ", highestFreq['freq'], " times in: ", for i in range(len(highestFreq['docs'])): if i > 0: print "and" print highestFreq['docs'][i] print def orQuery(self): print word1 = raw_input("Enter the first word of your query: ") word2 = raw_input("Enter the second word of your query: ") token1 = word1.lower() token1 = self.stemmer.stem(token1, 0, len(token1) - 1) token2 = word2.lower() token2 = self.stemmer.stem(token2, 0, len(token2) - 1) try: docs = self.tokens[token1].keys() except: print word1, "does not seem to exist in our files. Please try a different word" print return try: docs2 = self.tokens[token2].keys() except: print word2, "does not seem to exist in our files. Please try a different word" print return #Perform a logical or on the elements of both lists. #Storing them in keys for doc in docs2: if doc not in docs: docs.append(doc) occurenceTotal = 0 i = 1 highestFreq = {'freq': 0, 'docs':[]} for doc in docs: freq1 = 0 freq2 = 0 try: freq1 = len(self.tokens[token1][doc]) except: None try: freq2 = len(self.tokens[token2][doc]) except: None freq = freq1 + freq2 occurenceTotal += freq linksQuery = """ SELECT webPage.linkText, item.itemName FROM ( SElECT itemToWebPage.webPageId, itemToWebPage.itemId FROM itemToWebPage WHERE webPageId = ?) AS linkItem JOIN item ON item.itemId = linkItem.itemId JOIN webPage ON webPage.webPageId = linkItem.webPageId; """ linksRow = self.dbQuery(linksQuery, (doc,)) print print i,"\t",linksRow[0][0] print "\t item: ",linksRow[0][1] print "\t occured ",freq,"times" i += 1 if freq > highestFreq['freq']: highestFreq['freq'] = freq highestFreq['docs'] = [linksRow[0][0]] elif freq == highestFreq['freq']: highestFreq['docs'].append(doc) print print "Total occurence of", word1, "or", word2, "is", occurenceTotal, "times" print "Highest frequency: ", highestFreq['freq'], " times in: ", for i in range(len(highestFreq['docs'])): if i > 0: print "and" print highestFreq['docs'][i] print def andQuery(self): print word1 = raw_input("Enter the first word of your query: ") word2 = raw_input("Enter the second word of your query: ") token1 = word1.lower() token1 = self.stemmer.stem(token1, 0, len(token1) - 1) token2 = word2.lower() token2 = self.stemmer.stem(token2, 0, len(token2) - 1) #Get the keys from both lists docs = [] try: docs1 = self.tokens[token1].keys() except: print word1, "does not seem to exist in our files. Please try a different word" print return try: docs2 = self.tokens[token2].keys() except: print word2, "does not seem to exist in our files. Please try a different word" print return #Perform a logical and on the elements of both lists. #Storing them in keys for doc in docs1: if doc in docs2: docs.append(doc) occurenceTotal = 0 i = 1 highestFreq = {'freq': 0, 'docs':[]} for doc in docs: freq1 = 0 freq2 = 0 try: freq1 = len(self.tokens[token1][doc]) except: None try: freq2 = len(self.tokens[token2][doc]) except: None freq = freq1 + freq2 occurenceTotal += freq linksQuery = """ SELECT webPage.linkText, item.itemName FROM ( SElECT itemToWebPage.webPageId, itemToWebPage.itemId FROM itemToWebPage WHERE webPageId = ?) AS linkItem JOIN item ON item.itemId = linkItem.itemId JOIN webPage ON webPage.webPageId = linkItem.webPageId; """ linksRow = self.dbQuery(linksQuery, (doc,)) print print i,"\t",linksRow[0][0] print "\t item: ",linksRow[0][1] print "\t occured ",freq,"times" i += 1 if freq > highestFreq['freq']: highestFreq['freq'] = freq highestFreq['docs'] = [linksRow[0][0]] elif freq == highestFreq['freq']: highestFreq['docs'].append(doc) print print "Total occurence of", word1, "and", word2, "is", occurenceTotal, "times" print "Highest frequency: ", highestFreq['freq'], " times in: ", for i in range(len(highestFreq['docs'])): if i > 0: print "and" print highestFreq['docs'][i] print def phraseQuery(self): print phrase = raw_input("Enter a two word phrase: ") while len(phrase.split(' ')) != 2: phrase = raw_input("Make sure your phrase is two words (e.g. 'hello goodbye'): ") words = phrase.split(' ') word1 = words[0] word2 = words[1] token1 = word1.lower() token1 = self.stemmer.stem(token1, 0, len(token1) - 1) token2 = word2.lower() token2 = self.stemmer.stem(token2, 0, len(token2) - 1) #Get the keys from both lists docs = [] try: docs1 = self.tokens[token1].keys() except: print word1, "does not seem to exist in our files. Please try a different word" print return try: docs2 = self.tokens[token2].keys() except: print word2, "does not seem to exist in our files. Please try a different word" print return #Perform a logical and on the elements of both lists. #Storing them in keys phraseDict = {} #Check which documents have both words for doc in docs1: if doc in docs2: doc1Pos = self.tokens[token1][doc] doc2Pos = self.tokens[token2][doc] #check which documents have the phrase in the correct order freq = 0 for pos1 in doc1Pos: for pos2 in doc2Pos: if pos2 == pos1 + 1: freq += 1 if freq > 0: phraseDict[doc] = freq occurenceTotal = 0 i = 1 highestFreq = {'freq': 0, 'docs':[]} for doc in phraseDict.keys(): freq = phraseDict[doc] occurenceTotal += freq linksQuery = """ SELECT webPage.linkText, item.itemName FROM ( SElECT itemToWebPage.webPageId, itemToWebPage.itemId FROM itemToWebPage WHERE webPageId = ?) AS linkItem JOIN item ON item.itemId = linkItem.itemId JOIN webPage ON webPage.webPageId = linkItem.webPageId; """ linksRow = self.dbQuery(linksQuery, (doc,)) print print i,"\t",linksRow[0][0] print "\t item: ",linksRow[0][1] print "\t occured ",freq,"times" i += 1 if freq > highestFreq['freq']: highestFreq['freq'] = freq highestFreq['docs'] = [linksRow[0][0]] elif freq == highestFreq['freq']: highestFreq['docs'].append(doc) print print "Total occurence of",phrase, "is", occurenceTotal, "times" print "Highest frequency: ", highestFreq['freq'], " times in: ", for i in range(len(highestFreq['docs'])): if i > 0: print "and" print highestFreq['docs'][i] print def nearQuery(self): print word1 = raw_input("Enter the first word: ") word2 = raw_input("Enter the second word: ") distance = input ("Enter the number of positions away you want to look: ") token1 = word1.lower() token1 = self.stemmer.stem(token1, 0, len(token1) - 1) token2 = word2.lower() token2 = self.stemmer.stem(token2, 0, len(token2) - 1) #Get the keys from both lists docs = [] try: docs1 = self.tokens[token1].keys() except: print word1, "does not seem to exist in our files. Please try a different word" print return try: docs2 = self.tokens[token2].keys() except: print word2, "does not seem to exist in our files. Please try a different word" print return #Perform a logical and on the elements of both lists. #Storing them in keys phraseDict = {} #Check which documents have both words for doc in docs1: if doc in docs2: doc1Pos = self.tokens[token1][doc] doc2Pos = self.tokens[token2][doc] #check which documents have the words within the allotted distance of each other freq = 0 for pos1 in doc1Pos: for pos2 in doc2Pos: if (pos2 - pos1 >= 0 - distance) and (pos2 - pos1 <= distance): freq += 1 if freq > 0: phraseDict[doc] = freq occurenceTotal = 0 i = 1 highestFreq = {'freq': 0, 'docs':[]} for doc in phraseDict.keys(): freq = phraseDict[doc] occurenceTotal += freq linksQuery = """ SELECT webPage.linkText, item.itemName FROM ( SElECT itemToWebPage.webPageId, itemToWebPage.itemId FROM itemToWebPage WHERE webPageId = ?) AS linkItem JOIN item ON item.itemId = linkItem.itemId JOIN webPage ON webPage.webPageId = linkItem.webPageId; """ linksRow = self.dbQuery(linksQuery, (doc,)) print print i,"\t",linksRow[0][0] print "\t item: ",linksRow[0][1] print "\t occured ",freq,"times" i += 1 if freq > highestFreq['freq']: highestFreq['freq'] = freq highestFreq['docs'] = [linksRow[0][0]] elif freq == highestFreq['freq']: highestFreq['docs'].append(doc) print print "Total occurence of",word1, "within ", distance, "positions of", word2, "was",occurenceTotal, "times" print "Highest frequency: ", highestFreq['freq'], " times in: ", for i in range(len(highestFreq['docs'])): if i > 0: print "and" print highestFreq['docs'][i] print def searchMenu(self): print print "-----------------------------------------------------------" print "\t Welcome to Stensland-ipedia!" print "\tWhere you can search to your hearts content!" print "-----------------------------------------------------------" print menu = True while menu: print "Choose the number corresponding to the query you would like to perform" print "---------------------------------------------------------------------" print "1.\tSingle token query." print "2.\tAND query." print "3.\tOR query." print "4.\t2-Token query." print "5.\tNear query." print "6.\tQuit" choice = raw_input("Enter your choice: ") if choice == '1': self.singleToken() elif choice == '2': self.andQuery() elif choice == '3': self.orQuery() elif choice == '4': self.phraseQuery() elif choice == '5': self.nearQuery() elif choice == '6': menu = False print "\n" else: print "That is not a thing I understand." print print print "Thank you for being my friend!" print def main(): #os.chdir('/Users/kristofer/comp_490/2lab') print "Preparing the search engine..." stenslandipedia = Searcher() stenslandipedia.searchMenu() if __name__ == "__main__": main()
UTF-8
Python
false
false
16,732
py
2
searchEngine.py
2
0.461511
0.447585
0
504
32.196429
115
Jordan-Camilletti/Project-Euler-Problems
10,548,439,683,351
a0ecbd1fc6934fc37a81977a88e715df839602d8
cdbadf1e63f74911a145c88bb5422da5c1977904
/python/16. Power digit Sum.py
3812c1a7024e0528cabf03000b5626d08755a472
[]
no_license
https://github.com/Jordan-Camilletti/Project-Euler-Problems
30a93d6130e14e9382ee7e7d86b739ae676aaacf
b3f54004907a5ee4db00ad1e4989e4f239dcbd14
refs/heads/master
2021-06-03T19:19:19.612809
2020-05-01T04:28:37
2020-05-01T04:28:37
72,213,187
1
0
null
false
2020-02-23T04:49:10
2016-10-28T14:12:12
2020-02-21T02:20:09
2020-02-23T04:49:09
570
1
0
0
Python
false
false
"""2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26. What is the sum of the digits of the number 2^1000?""" str=str(2**1000) tot=0 for x in str: tot=tot+int(x) print(tot)
UTF-8
Python
false
false
189
py
59
16. Power digit Sum.py
58
0.624339
0.486772
0
8
22.625
68
MonadWizard/python-basic
4,303,557,231,441
ead220a5b4b4901180a0eb2e18cfab39b08e632c
d87d83049f28da72278ca9aa14986db859b6c6d6
/basic/coreFundamental/tupleDemo/tupleBasic.py
b9ef6768c6dc72d8ef55ec75f859ae48dc8cfb63
[]
no_license
https://github.com/MonadWizard/python-basic
6507c93dc2975d6450be27d08fb219a3fd80ed64
624f393fcd19aeeebc35b4c2225bb2fe8487db39
refs/heads/master
2021-07-21T16:12:58.251456
2020-10-12T19:46:21
2020-10-12T19:46:21
223,625,523
1
0
null
false
2019-11-23T18:01:43
2019-11-23T17:14:21
2019-11-23T18:00:52
2019-11-23T18:01:42
0
0
0
3
HTML
false
false
""" A tuple is a sequence of immutable objects, therefore tuple cannot be changed. It can be used to collect different types of object. The objects are enclosed within parenthesis and separated by comma. Tuple is similar to list. Only the difference is that list is enclosed between square bracket, tuple between parenthesis and List has mutable objects whereas Tuple has immutable objects. """ data1=(101,981,1666,12,156,981,15) data2=(101,981,'abcd','xyz','m') data3=('aman','shekhar',100.45,98.2) data4=(101,981,'abcd','xyz','m') print(data1) print(data2) #### There can be an empty Tuple also which contains no object. Lets see an example of empty tuple. data5 = () print(data5) print(""" """) #### For a single valued tuple, there must be a comma at the end of the value. data6 = (10,) print(data6) print(""" """) #### Tuples can also be nested, it means we can pass tuple as an element to create a new tuple. print("data1 : ",data1) data7 = data1,(10,20,30,40) print("data1 : ",data1) print("data7 : ",data7) print(""" """) print("#### Accessing Tuple : ") print("data2 : ",data2) print(data2[0]) print(data2[0:2]) print(data2[-3:-1]) print(data2[0:]) print(data2[:2]) print(""" """) print("#### Elements in a Tuple : ") data=(1,2,3,4,5,10,19,17) print(data) print("""Data[0]=1=Data[-8] , Data[1]=2=Data[-7] , Data[2]=3=Data[-6] , Data[3]=4=Data[-5] , Data[4]=5=Data[-4] , Data[5]=10=Data[-3], Data[6]=19=Data[-2],Data[7]=17=Data[-1] """) print(""" """) #### Tuple can be added by using the concatenation operator(+) to join two tuples. print("data2 : ",data2) print("data3 : ",data3) print((data2 + data3)) print(""" """) #### repeating can be performed by using '*' operator by a specific number of time. print("data2 : ",data2) print("data2 * 2", (data2*2)) print("data3 : ",data3) print("data3 * 3 : ", data3*3) print(""" """) #### A subpart of a tuple can be retrieved on the basis of index. This subpart is known as tuple slice. print (data1[0:2]) print (data1[4]) print (data1[:-1]) print (data1[-5:]) print (data1) print(""" """) #### We can create a new tuple by assigning the existing tuple. print(data3) print(data4) print(data3+data4) print(""" """) del data3 #print(data3) #this tuple was deleted so seen error
UTF-8
Python
false
false
2,403
py
195
tupleBasic.py
165
0.620474
0.556388
0
114
18.95614
131
chinatszrn/momo_labeltool
1,614,907,752,572
49df60761f06ad4f9c40c681fa87cdbcb07e5f09
8eab8ba100521cedb0b8c62059b0000fe04da989
/mainwin.py
826f7760fe7f7c0d85a5276d4a6c6bd0c2cd0019
[]
no_license
https://github.com/chinatszrn/momo_labeltool
b3ffbdba2772050f830322c213611651fede3037
f3ddb9ff7e526cd224c674c84a7ee4f4e49850ed
refs/heads/master
2020-12-13T18:08:24.550696
2019-05-14T09:43:04
2019-05-14T09:43:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mainwin.ui' # # Created by: PyQt5 UI code generator 5.11.3 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_main_window(object): def setupUi(self, main_window): main_window.setObjectName("main_window") main_window.resize(1037, 784) self.centralwidget = QtWidgets.QWidget(main_window) self.centralwidget.setObjectName("centralwidget") self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_3.setObjectName("gridLayout_3") self.canvas = Canvas(self.centralwidget) self.canvas.setInteractive(True) self.canvas.setObjectName("canvas") self.gridLayout_3.addWidget(self.canvas, 1, 0, 1, 1) self.widget = QtWidgets.QWidget(self.centralwidget) self.widget.setObjectName("widget") self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget) self.horizontalLayout.setObjectName("horizontalLayout") self.fixitem = QtWidgets.QCheckBox(self.widget) self.fixitem.setObjectName("fixitem") self.horizontalLayout.addWidget(self.fixitem) self.index = QtWidgets.QCheckBox(self.widget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.index.sizePolicy().hasHeightForWidth()) self.index.setSizePolicy(sizePolicy) self.index.setObjectName("index") self.horizontalLayout.addWidget(self.index) self.control = QtWidgets.QCheckBox(self.widget) self.control.setChecked(True) self.control.setObjectName("control") self.horizontalLayout.addWidget(self.control) self.keypoint = QtWidgets.QCheckBox(self.widget) self.keypoint.setChecked(True) self.keypoint.setObjectName("keypoint") self.horizontalLayout.addWidget(self.keypoint) self.contour = QtWidgets.QCheckBox(self.widget) self.contour.setChecked(True) self.contour.setObjectName("contour") self.horizontalLayout.addWidget(self.contour) self.left_eyebrown = QtWidgets.QCheckBox(self.widget) self.left_eyebrown.setChecked(True) self.left_eyebrown.setObjectName("left_eyebrown") self.horizontalLayout.addWidget(self.left_eyebrown) self.right_eyebrown = QtWidgets.QCheckBox(self.widget) self.right_eyebrown.setChecked(True) self.right_eyebrown.setObjectName("right_eyebrown") self.horizontalLayout.addWidget(self.right_eyebrown) self.left_eye = QtWidgets.QCheckBox(self.widget) self.left_eye.setChecked(True) self.left_eye.setObjectName("left_eye") self.horizontalLayout.addWidget(self.left_eye) self.right_eye = QtWidgets.QCheckBox(self.widget) self.right_eye.setChecked(True) self.right_eye.setObjectName("right_eye") self.horizontalLayout.addWidget(self.right_eye) self.nose = QtWidgets.QCheckBox(self.widget) self.nose.setChecked(True) self.nose.setObjectName("nose") self.horizontalLayout.addWidget(self.nose) self.mouth_outter = QtWidgets.QCheckBox(self.widget) self.mouth_outter.setChecked(True) self.mouth_outter.setObjectName("mouth_outter") self.horizontalLayout.addWidget(self.mouth_outter) self.mouth_inner = QtWidgets.QCheckBox(self.widget) self.mouth_inner.setChecked(True) self.mouth_inner.setObjectName("mouth_inner") self.horizontalLayout.addWidget(self.mouth_inner) self.gridLayout_3.addWidget(self.widget, 0, 0, 1, 1) main_window.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(main_window) self.menubar.setGeometry(QtCore.QRect(0, 0, 1037, 18)) self.menubar.setObjectName("menubar") self.menu = QtWidgets.QMenu(self.menubar) self.menu.setObjectName("menu") self.menu_2 = QtWidgets.QMenu(self.menubar) self.menu_2.setObjectName("menu_2") main_window.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(main_window) self.statusbar.setObjectName("statusbar") main_window.setStatusBar(self.statusbar) self.dockWidget_2 = QtWidgets.QDockWidget(main_window) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.dockWidget_2.sizePolicy().hasHeightForWidth()) self.dockWidget_2.setSizePolicy(sizePolicy) self.dockWidget_2.setLayoutDirection(QtCore.Qt.LeftToRight) self.dockWidget_2.setObjectName("dockWidget_2") self.dockWidgetContents_2 = QtWidgets.QWidget() self.dockWidgetContents_2.setObjectName("dockWidgetContents_2") self.gridLayout = QtWidgets.QGridLayout(self.dockWidgetContents_2) self.gridLayout.setObjectName("gridLayout") self.file_list = FileList(self.dockWidgetContents_2) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.file_list.sizePolicy().hasHeightForWidth()) self.file_list.setSizePolicy(sizePolicy) self.file_list.setObjectName("file_list") self.gridLayout.addWidget(self.file_list, 0, 0, 1, 1) self.dockWidget_2.setWidget(self.dockWidgetContents_2) main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.dockWidget_2) self.actionload = QtWidgets.QAction(main_window) self.actionload.setObjectName("actionload") self.actionConvert = QtWidgets.QAction(main_window) self.actionConvert.setObjectName("actionConvert") self.actionConvert1k = QtWidgets.QAction(main_window) self.actionConvert1k.setObjectName("actionConvert1k") self.actionBrezier = QtWidgets.QAction(main_window) self.actionBrezier.setObjectName("actionBrezier") self.menu.addAction(self.actionload) self.menu_2.addAction(self.actionConvert) self.menu_2.addAction(self.actionConvert1k) self.menu_2.addAction(self.actionBrezier) self.menubar.addAction(self.menu.menuAction()) self.menubar.addAction(self.menu_2.menuAction()) self.retranslateUi(main_window) QtCore.QMetaObject.connectSlotsByName(main_window) def retranslateUi(self, main_window): _translate = QtCore.QCoreApplication.translate main_window.setWindowTitle(_translate("main_window", "关键点标注")) self.fixitem.setText(_translate("main_window", "固定图片(F)")) self.fixitem.setShortcut(_translate("main_window", "F")) self.index.setText(_translate("main_window", "序号(S)")) self.index.setShortcut(_translate("main_window", "S")) self.control.setText(_translate("main_window", "控制点(Q)")) self.control.setShortcut(_translate("main_window", "Q")) self.keypoint.setText(_translate("main_window", "关键点(W)")) self.keypoint.setShortcut(_translate("main_window", "W")) self.contour.setText(_translate("main_window", "脸轮廓(E)")) self.contour.setShortcut(_translate("main_window", "E")) self.left_eyebrown.setText(_translate("main_window", "左眉毛(R)")) self.left_eyebrown.setShortcut(_translate("main_window", "R")) self.right_eyebrown.setText(_translate("main_window", "右眉毛(T)")) self.right_eyebrown.setShortcut(_translate("main_window", "T")) self.left_eye.setText(_translate("main_window", "左眼睛(Y)")) self.left_eye.setShortcut(_translate("main_window", "Y")) self.right_eye.setText(_translate("main_window", "右眼睛(U)")) self.right_eye.setShortcut(_translate("main_window", "U")) self.nose.setText(_translate("main_window", "鼻子(I)")) self.nose.setShortcut(_translate("main_window", "I")) self.mouth_outter.setText(_translate("main_window", "嘴外轮廓(O)")) self.mouth_outter.setShortcut(_translate("main_window", "O")) self.mouth_inner.setText(_translate("main_window", "嘴内轮廓(P)")) self.mouth_inner.setShortcut(_translate("main_window", "P")) self.menu.setTitle(_translate("main_window", "文件")) self.menu_2.setTitle(_translate("main_window", "数据处理")) self.actionload.setText(_translate("main_window", "载入文件夹")) self.actionConvert.setText(_translate("main_window", "生成137点")) self.actionConvert1k.setText(_translate("main_window", "生成1000点")) self.actionBrezier.setText(_translate("main_window", "生成贝塞尔关键点")) from canvas import Canvas from filelist import FileList
UTF-8
Python
false
false
9,099
py
9
mainwin.py
7
0.695371
0.686447
0
166
53.006024
108
radiofarmer/WavetableEditor
8,959,301,814,077
4f18f22d69b81b620c812ed4c63ba488b55ddf0d
89b5aa59f30f7417c19b8300a4da100fc6d8205b
/WavetableEditor/Wavetable.py
fc980a751cbad126aeb2cd0060953502cac9538f
[ "MIT" ]
permissive
https://github.com/radiofarmer/WavetableEditor
810bb639165efd0a6ef29f2a7e8fbc95a678e3f3
2cee8d773d24978ef3edc52a85b4285a48506f25
refs/heads/master
2023-03-11T08:00:15.309139
2021-02-26T09:22:35
2021-02-26T09:22:35
340,004,065
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np from WavetableEditor import IO from scipy import fft from scipy import interpolate import matplotlib.pyplot as plt import math def quantize_to_fraction(x, f): return np.floor(f * x) / f def make_shift_function(x, shift_step, max_shift=1., noise=0.): func = interpolate.interp1d(np.linspace(0, 1, x.shape[0]), x / np.max(x)) return lambda h: h + max_shift * quantize_to_fraction( func(np.linspace(0, 1., h.shape[0])) + np.random.random(h.shape[0]) * noise, shift_step ) def window(k, length): t = np.linspace(0, 1, length) fact = -np.abs((k - np.floor(k)) - 0.5) + 0.5 return (-np.cos(2 * np.pi * t) / 2 + 0.5) ** fact def oscillating_signs(harmonics): signs = np.ones(harmonics.shape[0]) for i, h in enumerate(harmonics): if h <= 1.: continue else: num = h / (h - np.floor(h)) if h != int(h) else h signs[i] *= 1. if num % 2 else -1. return signs def square_coeffs(harmonics): coeffs = np.zeros(harmonics.shape[0]) for i, h in enumerate(harmonics): if h == 0: coeffs[i] = 0 elif h % 2 == 0: coeffs[i] = 0 else: coeffs[i] = 1 / h return coeffs def saw_coeffs(harmonics): coeffs = np.zeros(harmonics.shape[0]) for i, h in enumerate(harmonics): if h == 0: coeffs[i] = 0 else: coeffs[i] = 1 / h return coeffs def zero_phase(h, *args, **kwargs): return np.zeros(h.shape[0]) class HarmonicSeries: def __init__(self, start, end, period, amp_func, phase_func=None, h_shift_func=None, **kwargs): if phase_func is None: phase_func = zero_phase if h_shift_func is None: h_shift_func = lambda x: x # Array of harmonic numbers self.harmonics = np.arange(start, end, period) h = h_shift_func(self.harmonics) self.harmonics = h # Array of amplitudes self._amplitudes = amp_func(self.harmonics) self._scale = 1. # Array of phases self._phases = phase_func(self.harmonics) self._step_size = period if "normalize" in kwargs: self.normalize(kwargs["normalize"]) def __mul__(self, other): """Multiplying the HarmonicSeries object multiplies the amplitudes""" self._scale = other return self def normalize(self, h_target): """Normalizes all amplitudes to the indicated harmonic""" if np.max(np.abs(self._amplitudes)) != 0.: self._amplitudes /= self._amplitudes[int(h_target - 1)] if np.max(np.abs(self._phases)) != 0.: self._phases /= self._phases[int(h_target - 1)] self._phases *= 2 * np.pi def evaluate(self, samprate, cycles=1, os_level=8, bandlimit=None, window=True): # t = np.arange(0, samprate * cycles) t = np.linspace(0., cycles * 2 * np.pi, samprate * cycles, endpoint=False) series = np.zeros(t.shape[0]) # adj = np.cos(self.harmonics - 1) ** 2 * (np.pi / (2 * self.harmonics[-1])) # gibbs adj = np.sinc(self.harmonics * np.pi / (2 * np.max(self.harmonics))) # sigma factor for a, p, h, g in zip(self.amplitudes, self.phases, self.harmonics, adj): if h <= bandlimit / os_level if bandlimit is not None else samprate / (4 * os_level): # Harmonics whose waveforms do not have an integer-number of cycles within # the rendered region are windowed to prevent aliasing. Increasing the number # of cycles allows more inharmonic frequencies to fit evenly into the wavetable. if not window or np.abs(h * cycles - np.round(h * cycles)) < 1e-3: partial = a * np.sin(float(h) * t + p) else: print("Harmonic {} does not fit into {} cycles".format(h, cycles)) wnd1 = np.concatenate([(np.cos(np.pi * t[:int(samprate)] / samprate) + 1) / 2, np.zeros(int(samprate * (cycles - 1)))]) wnd2 = np.concatenate([np.ones(int(samprate * (cycles - 1))), wnd1[int(samprate) - 1::-1]]) t_ext = np.arange(0, np.floor(samprate * cycles * (1 + h - np.floor(h)))) wave_full = np.sin(2 * np.pi * h * t_ext / samprate + p) wave1 = wave_full[:int(samprate * cycles)] wave2 = wave_full[len(wave_full) - int(samprate * cycles):] partial = a * (wave1 * wnd1 + wave2 * wnd2) series += partial * g return series @property def amplitudes(self): return self._amplitudes @property def phases(self): return self._phases @property def scale(self): return self._scale @property def max_harmonic(self): return self.harmonics[-1] @property def num_harmonics(self): return self.harmonics.shape[0] @property def step_size(self): return self._step_size class Waveform(): def __init__(self): self.series_ = [] def add_series(self, *args, **kwargs): new_series = HarmonicSeries(*args, **kwargs) self.append_series(new_series) def append_series(self, new_series): self.series_.append(new_series) def normalize(self): """Normalizes all series so that the (summed) maximum harmonic (not necessarily the fundamental) is one""" fund_sum = np.sum([np.max(s.amplitudes) for s in self.series_]) for s in self.series_: s.normalize(fund_sum) def generate_series(self, samprate, **kwargs): if "cycles" in kwargs: num_cycles = kwargs['cycles'] else: num_cycles = 1 length = int(samprate * num_cycles) sum_sines = np.zeros(length) # Sum sinusoids of all harmonic series for s in self.series_: s_wave = s.evaluate(samprate, **kwargs) s_wave /= np.max(np.abs(s_wave)) if np.max(s_wave) else 1. sum_sines += s_wave * s.scale # Normalize to the highest value if np.max(np.abs(sum_sines)) != 0.: self.waveform = sum_sines / np.max(np.abs(sum_sines)) # self.waveform = sum_sines / (sum_sines ** 2).sum() * num_cycles else: self.waveform = sum_sines return self.waveform def generate_ifft(self, samprate): freq_domain = np.zeros(samprate // 2) for s in self.series_: offset = s.harmonics[0] top_harmonic = s.max_harmonic + offset amp_interp_func = interpolate.interp1d(s.harmonics, s.amplitudes) phase_interp_func = interpolate.interp1d(s.harmonics, s.phases) a_resampled = amp_interp_func(np.arange(offset, top_harmonic, s.step_size)) p_resampled = phase_interp_func(np.arange(offset, top_harmonic, s.step_size)) # Pad the amplitude and phase arrays with zeros if the fundamental frequency of # the series is not the wavetable fundamental if top_harmonic >= samprate // 2: a = a_resampled[:math.floor(samprate // 2 - np.ceil(offset))] p = p_resampled[:math.floor(samprate // 2 - np.ceil(offset))] else: a = np.concatenate([a_resampled, np.zeros(math.floor(samprate // 2 - top_harmonic))]) p = np.concatenate([p_resampled, np.zeros(math.floor(samprate // 2 - top_harmonic))]) if offset > 1: a = np.concatenate([np.zeros(offset), a]) p = np.concatenate([np.zeros(offset), p]) # Interpolate amplitude and phase values s_complex = a + 1.0j * p # Pad with the DC offset value s_complex = np.concatenate([[0], s_complex]) # Add to the frequency-domain representation freq_domain = np.add(freq_domain, s_complex[:samprate // 2]) # Add negative frequencies freq_domain = np.concatenate([freq_domain, np.conj(freq_domain[::-1])]) * samprate self.waveform = fft.ifft(freq_domain) return self.waveform def from_sample(self, samples, samprate, cycles=1): fft_length = min(samprate * cycles, samples.shape[0]) transform = fft.fft(samples[:fft_length]) amps = np.real(transform) phases = np.imag(transform) # Shift the fundamental frequency to bin [cycles] clip_region = np.argmax(np.abs(transform)) transform = np.concatenate([np.zeros(cycles), transform[clip_region:], np.zeros(clip_region)]) self.waveform = transform return self.waveform class Wavetable(): def __init__(self): self.waves_ = [] def freq_spec(x): return np.abs(fft.fft(x)) def plot_freqs(x, freq_max=None): if freq_max is None: freq_max = x.shape[0] fourier_transform = fft.fft(x) plt.plot(np.abs(fourier_transform[:freq_max])) plt.show() def plot_fft(x, freq_max=None): if freq_max is None: freq_max = x.shape[0] fourier_transform = fft.fft(x) plt.plot(np.real(fourier_transform[:freq_max])) plt.plot(np.imag(fourier_transform[:freq_max])) plt.show() if __name__ == "__main__": wt1 = Waveform() wt1.add_series(1, 2, 1, saw_coeffs) # wave = wt1.generate_series(48000, cycles=3) # plt.plot(np.abs(fft.fft(wave)[:100])) wt2 = Waveform() wt2.add_series(1, 200, 1, saw_coeffs) # wave = wt2.generate_series(48000, cycles=3) # wave = np.tile(wave, 10) # plt.plot(np.abs(fft.fft(wave)[:100])) # plt.show() IO.export_mipmap([wt1, wt2], "", "Sine-Saw", 2 ** 14, cycles_per_level=1)
UTF-8
Python
false
false
9,839
py
5
Wavetable.py
4
0.568249
0.55402
0
286
33.402098
114
Saumay-Agrawal/GSOC-Explorer
8,443,905,714,839
9c81268fdb2348ddbbb8e1607b1e4af6810bfc69
b39074034e46a57753cd22a9ea147dafc158c26d
/data-extractor.py
c48f3f82cf9cf49af0d647d4f73a39aef1d878da
[]
no_license
https://github.com/Saumay-Agrawal/GSOC-Explorer
2590aa6bea9f792633cb51ed3983840df5ac6d3a
6c82c7b9ecdede5d13c87fcae621a2731cbf94ef
refs/heads/master
2020-04-10T23:01:28.683316
2019-02-22T09:51:26
2019-02-22T09:51:26
161,339,391
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pymongo import json from pprint import pprint client = pymongo.MongoClient('mongodb://localhost:27017/') db = client['gsoc-data'] col = db['flat_data'] file = open('gsoc-data.json', mode='w', encoding='utf-8') file.write('[') count = 0 for doc in col.find(): del doc['_id'] file.write(json.dumps(doc, sort_keys=True) + ',') count += 1 pprint('{} documents written to the file.'.format(count)) file.write(']') file.close()
UTF-8
Python
false
false
448
py
10
data-extractor.py
5
0.654018
0.636161
0
20
21.45
61
vasu4982/flask-apps-with-blueprints
4,844,723,153,112
8b762bea4713ac191cd00c66a508ee9d6604a3fa
70d59ad4466a6eea0ea4bca03a7786921476353d
/blueprints/__init__.py
dbe334a55577633f3ae71d63ea77888fedd2a5f4
[]
no_license
https://github.com/vasu4982/flask-apps-with-blueprints
0b295e40f6ef89216ff0d49c1a98e15c70b5bcc1
b8cab7683100f87b813abdbac09ac9695c8b93a6
refs/heads/master
2021-09-13T10:21:44.081675
2018-04-28T05:49:40
2018-04-28T05:49:40
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import Flask from blueprints.home.views import home from blueprints.about.views import about app = Flask(__name__) app.register_blueprint(home, url_prefix='/home') app.register_blueprint(about, url_prefix='/about')
UTF-8
Python
false
false
227
py
2
__init__.py
1
0.77533
0.77533
0
7
31.428571
50
jan-g/psh
9,680,856,329,124
0a312f88f484230dea3ccbf1119bdaf7bfa68393
39729564419ed0c233d2fe2c215214e52e3c4a10
/test/test_model_case.py
0bbd37b0577985b71e84b5dd314e113c55fec4a9
[ "Apache-2.0" ]
permissive
https://github.com/jan-g/psh
b4ec8aae7e394ccbf85b97fa0e71482296e03974
c2641c9d2d7051dacb41474123889dd04bdd2989
refs/heads/master
2020-09-14T16:22:57.988393
2019-12-07T22:41:00
2019-12-07T22:41:00
223,183,178
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pytest from psh.model import Word, Id, CommandSequence, Command, Case, VarRef, ConstantString from psh.glob import STAR from psh.local import make_env w = lambda w: Word([Id(w)]) a = Word([VarRef(Id("a"))]) echo = lambda out: CommandSequence([Command([Word([Id("echo")]), Word([ConstantString(out)])])]) x = w("x") cmd = lambda *cs: CommandSequence([Command([*cs])]) star = Word([STAR]) @pytest.mark.parametrize(("cmd", "variable", "expected"), ( (CommandSequence([Case(a)]), "", ""), (CommandSequence([Case(a).with_case(x, echo("foo"))]), "", ""), (CommandSequence([Case(a).with_case(x, echo("foo"))]), "y", ""), (CommandSequence([Case(a).with_case(x, echo("foo"))]), "x", "foo"), (CommandSequence([Case(a).with_case(x, echo("foo")).with_case(star, echo("bar"))]), "", "bar"), (CommandSequence([Case(a).with_case(x, echo("foo")).with_case(star, echo("bar"))]), "y", "bar"), (CommandSequence([Case(a).with_case(x, echo("foo")).with_case(star, echo("bar"))]), "x", "foo"), ), ids=lambda x: x.replace(" ", "_") if isinstance(x, str) else x) def test_basic(cmd, variable, expected): env = make_env() env["a"] = variable assert cmd.evaluate(env) == expected
UTF-8
Python
false
false
1,206
py
31
test_model_case.py
30
0.615257
0.615257
0
28
42.071429
100
andrisole92/VectorAI
18,717,467,489,324
946540bc473e1d5b42db8ea31ddd242198c3f070
b18b340f7d27b349ed8f344f1827331d1812249b
/src/__init__.py
9f678db9c3b941ae8e79443bb0cec983662d336c
[]
no_license
https://github.com/andrisole92/VectorAI
42917758cb305d562a48b7f64ab18c824ab487fb
03e6a8e5d6ff76a03c9108f6f507f47dfe7fd04f
refs/heads/master
2021-01-05T22:09:46.285959
2020-02-22T18:33:26
2020-02-22T18:33:26
241,149,928
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from src.db.Engine import Engine from src.db.SessionManager import SessionManager
UTF-8
Python
false
false
83
py
14
__init__.py
12
0.843373
0.843373
0
2
40
48
iaasci-ibm/python-zvm-sdk
10,857,677,372,748
a320a32d2f4c4953df50939aef8d1c8cdba90914
516e5ad7aa37dee9c6f6602dc63b66bf3d361f37
/zvmsdk/tests/unit/base.py
a0bd874f1c8a5a8a3b4e12410bc52544d6e37d65
[ "CC-BY-4.0", "Apache-2.0" ]
permissive
https://github.com/iaasci-ibm/python-zvm-sdk
3eec5b82d5e28c19e5213626102a237070e54e7c
c39d4522b2311da0bb06910b6bb3b20ad32a8ae4
refs/heads/master
2022-02-13T04:37:50.098878
2022-01-28T18:17:44
2022-01-28T18:17:44
227,066,210
0
2
Apache-2.0
true
2021-04-09T00:33:39
2019-12-10T08:26:36
2021-04-08T15:32:19
2021-04-09T00:33:38
6,790
0
0
0
C
false
false
# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from zvmsdk import config CONF = config.CONF def set_conf(section, opt, value): CONF[section][opt] = value class SDKTestCase(unittest.TestCase): @classmethod def setUpClass(cls): # This can be used to set up confs before running all cases super(SDKTestCase, cls).setUpClass() cls.old_db_dir = CONF.database.dir set_conf('database', 'dir', '/tmp/') set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') set_conf('image', 'sdk_image_repository', '/tmp/') set_conf('zvm', 'namelist', 'TSTNLIST') @classmethod def tearDownClass(cls): super(SDKTestCase, cls).tearDownClass() # Restore the original db path CONF.database.dir = cls.old_db_dir def setUp(self): super(SDKTestCase, self).setUp() def _fake_fun(self, value=None): return lambda *args, **kwargs: value
UTF-8
Python
false
false
1,502
py
308
base.py
121
0.665113
0.659787
0
50
29.04
78
vineetsingh065/30_days_of_problem_solving
1,082,331,796,878
9456c4ad4b3f9f67af05bf8cbf07f9a27872d3ab
b8f68d68c49a191b06d0c83ebd7be0a7bde0cc28
/day_6_total_set_bits.py
1f3292920266b522d40a6053ead627093e6d5910
[]
no_license
https://github.com/vineetsingh065/30_days_of_problem_solving
485f6033e5785ae365d14728cbadd5f44158afb0
40014080b135378359742b9493334f4079309862
refs/heads/master
2023-08-25T01:00:53.208219
2021-10-10T15:23:37
2021-10-10T15:23:37
403,340,800
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" You are given a number N. Find the total count of set bits for all numbers from 1 to N(both inclusive). Example 1: Input: N = 4 Output: 5 Explanation: For numbers from 1 to 4. For 1: 0 0 1 = 1 set bits For 2: 0 1 0 = 1 set bits For 3: 0 1 1 = 2 set bits For 4: 1 0 0 = 1 set bits Therefore, the total set bits is 5. """ def countSetBits(n): count=0 i=1 while(i<=n): i=i*2 q=(n+1)//i r=(n+1)%i t=q*(i//2) if r>(i//2): t+=r-i//2 count+=t return count if __name__=='__main__': n = 17 print(countSetBits(n))
UTF-8
Python
false
false
613
py
14
day_6_total_set_bits.py
14
0.522023
0.461664
0
36
16.027778
103
kongqiuchuipin/UsedCar
15,513,421,918,185
640db27271cd5ac733f759af392877446d731314
22e585820e19df8d28eb164db1b02d6832e09f63
/youxin/youXin.py
476bdfca5fe65619052f8f4322745c2f12807e9b
[]
no_license
https://github.com/kongqiuchuipin/UsedCar
f15100ff8f62790fe67835f72a7eb3914053d9ab
b6938c88b2edb6c7d783946e41aece5304b83235
refs/heads/master
2021-01-25T09:44:36.910714
2018-03-01T07:52:04
2018-03-01T07:52:04
123,316,549
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# _*_ coding:utf-8 _*_ from link import Link, get_all_city_name from info import info from pymongo import MongoClient from shelve_method_ import shelve_open, shelve_add, shelve_save def all_city_links(): all_city = shelve_open('all_city_name') # 如果已经有城市列表 if not all_city: all_city = get_all_city_name() # 如果没有 shelve_save('all_city_name', all_city) return all_city def get_link(): all_city = all_city_links() done = shelve_open('city_done_link') # 采集过的 doing = [i for i in all_city if i not in done] for city in doing: if city not in done: # 未采集的 link_spider = Link(city) link_spider.get_links() shelve_add('city_done_link', city) # 在这里设置记录, 下一次采集在这之后 def get_info(): # 针对全部城市的采集 collection = MongoClient()['youXin']['all'] city_links = set(shelve_open('all_link')) city_invalid_links = set(shelve_open('invalid_link')) if city_links: # 如果文件里存在链接 in_database = {i['链接'][8:] for i in collection.find()} l_i_d = len(in_database) link_for_catch = city_links - in_database - city_invalid_links l_f_c = len(link_for_catch) print('数据库:{}, 待采集{}, 共{}条'.format(l_i_d, l_f_c, l_i_d + l_f_c)) for i, link in enumerate(link_for_catch): print(i + l_i_d, 'of', l_i_d + l_f_c) # # 页面不存在这种情况也计算在内 doc = info(link) if doc: collection.insert_one(doc) print('完成'.center(30, '*')) if __name__ == '__main__': # get_link() get_info()
UTF-8
Python
false
false
1,714
py
15
youXin.py
14
0.574742
0.572165
0
49
30.673469
72
mberrens/IceContinuum
6,717,328,890,219
42171544d81c325c031f89fed30c8d8cf9e89c6a
c65ac5b9fd6a679a9837b455b0eaeb74171b66bc
/netcdfstuff/loader.py
87fe3751a1b39c54e44afb69d04da66e6c926af5
[]
no_license
https://github.com/mberrens/IceContinuum
7705b01f9762647ff5e3d0f808108d0f6ed64514
10f2ed23cb93c1e212aef4689e90d8ff4fda0225
refs/heads/master
2021-05-06T11:33:12.599154
2019-08-12T02:42:05
2019-08-12T02:42:05
114,292,998
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# .. Built-in modules import pickle import numpy as np import scipy.io as spio from netCDF4 import Dataset, num2date, date2index from scipy.io.netcdf import NetCDFFile as DS from scipy.interpolate import interp2d import array def load_ssp_nu(datafile, nu): # Load the moments from a netcdf file & return the unbundled arrays dnu = nu[1] - nu[0] with DS(datafile,'r') as nc: inu = np.where(np.logical_and(\ nc.variables['wnum_list'][:] > nu[0]-2*dnu, nc.variables['wnum_list'][:] < nu[-1]+2*dnu))[0] Npmomarray = nc.variables['Npmomarray'][:,inu].astype('int32') w0_mesh = nc.variables['w0_mesh'][:,inu].astype('float64') qext_mesh = nc.variables['qext_mesh'][:,inu].astype('float64') reff = nc.variables['reff_list'][:].astype('float64'); reff = reff[:,0] wnum_vec = nc.variables['wnum_list'][inu].astype('float64') # wnum_mesh = nc.variables['wnum_mesh'][:,inu].astype('float64') # reff_mesh = nc.variables['reff_mesh'][:,inu].astype('float64') # Set up empty output arrays Nnu = nu.size Nreff = reff.size qext = np.zeros((Nreff, Nnu)) w0 = np.zeros((Nreff, Nnu)) NPmom_fp = np.zeros((Nreff, Nnu)) # Interpolate qext, w0, get an interpolated number of moments! fq = interp2d(reff, wnum_vec, qext_mesh.T) fw = interp2d(reff, wnum_vec, w0_mesh.T) fNP = interp2d(reff, wnum_vec, Npmomarray.T) for i in range(Nreff): qext[i,:] = fq(reff[i], nu)[:,0] w0[i,:] = fw(reff[i], nu)[:,0] NPmom_fp[i,:] = fNP(reff[i], nu)[:,0] # Use floor so we never interpolate between a moment and 0. NPmom = np.floor(NPmom_fp).astype(int) NPmom_max = np.max(NPmom) pmomarray = nc.variables['pmomarray'][:,inu,:NPmom_max] pmomarray = pmomarray.astype('float64') # Loop over all the moments to do the same Pmom = np.zeros((Nreff, Nnu, NPmom_max)); for j in range( NPmom_max): f = interp2d(reff, wnum_vec, pmomarray[:,:,j].T) for i in range(Nreff): Pmom[i,:,j] = f(reff[i], nu)[:,0] return (NPmom, Pmom, reff, w0, qext) def getsolarbeam_IR (wnum=None, solarbeam_IR_file=None): #print solarbeam_IR_file kurucz = np.loadtxt(solarbeam_IR_file) #'kurucz.dat') beam = np.interp(wnum,kurucz[:,0],kurucz[:,1])/1000; return (beam) # # # # # # # # LOAD SURFACE ALBEDO # # # # # # # # # # # # # # def get_surface_albedo_from_file(surfEmissDataFile): albedoData = np.loadtxt(surfEmissDataFile, comments='%') nu_surf_albedo = albedoData[:, 1] surf_albedo = 1-albedoData[:, 2] return nu_surf_albedo, surf_albedo def get_surface_albedo_IR(wnum = None, surfEmissDataFile = None): Mammoth = np.loadtxt(surfEmissDataFile) #'Mammoth.dat') emissivity = np.interp(wnum, Mammoth[:,1], Mammoth[:,2]) emissivity[emissivity>1.] = 1. emissivity[emissivity<0.] = 0. #for i in range(len(emissivity)): # if emissivity[i]>1.: # emissivity[i] = 1. # elif emissivity[i]<0.: # emissivity[i] = 0. #emissivity = min(interp1(Mammoth(:,2),Mammoth(:,3),wnum,'linear','extrap'),1); #emissivity = max(emissivity,0); surface_albedo = 1. - emissivity; return surface_albedo # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def load_od_gas(odfile): ''' Purpose: Load in the matlab generated file because python is too slow for cubic interp and gives small differences But the python way is saved for reference in extras.py ''' odinfo = spio.loadmat(odfile) date = odinfo['date'] nu = odinfo['nu'][0] rads = odinfo['rads'] rad_above = odinfo['rad_above'][0] tsc = odinfo['tsc'] view_angle = odinfo['view_angle'] date_above_bef = odinfo['date_above_bef'] date_above_aft = odinfo['date_above_aft'] Bctc = odinfo['Bc_tsc'] dt_dtau = odinfo['dt_dtau'] # radClr = odinfo['radClr']; # odlyr = odinfo['odlyr'] # print('Loaded od file ' + odfile) return date, view_angle, nu, rads, tsc, rad_above, \ date_above_bef, date_above_aft, Bctc, dt_dtau # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def load_profiles(prof_file): ''' Load in the profile file ''' # .. Load in the profile with Dataset(prof_file, 'r', format='NETCDF4_CLASSIC') as nc: z = np.double(nc['z'][:].data) P = np.double(nc['P'][:].data) T = np.double(nc['T'][:].data) h2o = np.double(nc['h2o'][:].data) return z, P, T, h2o # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def load_surface_temperatures(metfname, beg_datetime, end_datetime): ''' Load the surface temperatures from a file ''' with Dataset(metfname, 'r', format= "NETCDF4") as nc: itime = np.logical_and( \ date2index(beg_datetime, nc.variables['time'], select='after'), \ date2index(end_datetime, nc.variables['time'], select='before')) surf_time = num2date(nc.variables['time'][itime], nc.variables['time'].units) surf_temp = nc.variables['temp_mean'][itime] + 273.15 #ikeep = np.logical_and(np.where(surf_time>=beg_datetime)[0], # np.where(surf_time<=end_datetime)[0]) return surf_time, surf_temp # surf_time[ikeep], surf_temp[ikeep] # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def load_surface_temperatures_for_datetime(metfname, thisdatetime, z, T): ''' Load the surface temperatures from a file ''' with Dataset(metfname, 'r', format= "NETCDF4") as nc: itime = date2index(thisdatetime, nc.variables['time'], select='after') surf_time = num2date(nc.variables['time'][:], nc.variables['time'].units) dd = surf_time[itime-1:itime+1] - thisdatetime dmin = [d.days*24*60 + d.seconds/60 for d in dd] wt = np.flipud(np.abs(dmin)) wt = wt/np.sum(wt) surf_temp = wt[0] * nc.variables['temp_mean'][itime-1] + \ wt[1] * nc.variables['temp_mean'][itime] + 273.15 i1km = np.where(z<=1)[0][-1] Tnew = np.zeros(i1km) Tnew[0] = surf_temp # + 0 m Tnew[1:i1km] = np.interp(z[1:i1km], [z[0], z[i1km]], [Tnew[0], T[i1km]]) return Tnew # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def get_prof_obsolete(prof_dir, prof_file, dir_lblrtm, z_toa, bwn, ewn): ''' Load in the profile file ''' # .. Load in the profile nc = Dataset(prof_dir + prof_file, 'r', format='NETCDF4_CLASSIC') zm = nc['z'][:] nlyr_toa = (np.abs(zm - z_toa)).argmin() itop = nlyr_toa + 1 # .. Units for ozone are jchar = C for g/kg units = dict({('tm','km'), ('pm','mb'), ('h2o','ppmv'), ('co2','ppmv'), ('o3','gm_kg'), ('hno3','ppmv')}) # .. Set prof values from prof_file, as well as inputs bwn and ewn # viewing angle set to zenith. To output optical depths from LBLRTM, # iemit is set to 0 and imrg to 1. prof = { "v1": bwn, "v2": ewn, "zm": zm[:itop], "pm": nc['P'][:itop], "tm": nc['T'][:itop], "h2o": nc['h2o'][:itop], "co2": nc['co2'][:itop], "o3": nc['o3'][:itop], "hno3": nc['hno3'][:itop], "f11": nc['f11'][:itop], "f12": nc['f12'][:itop], "f113": nc['f113'][:itop], "units": units, "zangle": 0, "fnametape5": dir_lblrtm + "TAPE5" , "model": 0, "modelExtra": 3, "iod": 0 , "iatm": 1, "ipunch": 1, "iemit": 0, "imrg": 1, } # .. Add this later? # if do_refl # prof.surf_refl = surf_refl ; return prof, nlyr_toa # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def get_prof_pickle(prof_dir, prof_file, dir_lblrtm, z_toa, bwn, ewn): ''' Load in the profile file that was pickled. ''' # .. Load in the profile pstuff = pickle.load(open(prof_dir + prof_file, 'rb')) zm = pstuff.zm nlyr_toa = (np.abs(zm - z_toa)).argmin() itop = nlyr_toa + 1 # .. Set prof values from prof_file, as well as inputs bwn and ewn # viewing angle set to zenith. To output optical depths from LBLRTM, # iemit is set to 0 and imrg to 1. prof = { "v1": bwn, "v2": ewn, "zm": pstuff.zm[:itop], "pm": pstuff.pm[:itop], "tm": pstuff.tm[:itop], "h2o": pstuff.h2o[:itop], "co2": pstuff.co2[:itop], "o3": pstuff.o3[:itop], "hno3": pstuff.hno3[:itop], "f11": pstuff.f11[:itop], "f12": pstuff.f12[:itop], "f113": pstuff.f113[:itop], "units": pstuff.units, "zangle": 0, "fnametape5": dir_lblrtm + "TAPE5" , "model": 0, "modelExtra": 3, "iod": 0 , "iatm": 1, "ipunch": 1, "iemit": 0, "imrg": 1, } # .. Add this later? # if do_refl # prof.surf_refl = surf_refl ; return prof, nlyr_toa # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def load_profiles_pickle(prof_file): pstuff = pickle.load(open(prof_file, 'rb')) return pstuff.zm, pstuff.pm, pstuff.tm, pstuff.h2o def load_cloud_layers(cloud_layer_file, z, thisdatetime): with Dataset(cloud_layer_file, 'r', format = "NETCDF4") as nc: cloud_mask = nc['cloud_mask'][:].T height = nc['range'][:].data alt0 = nc['altitude'][0].data # .. Get the times mask_date = num2date(nc['time'][:], nc['time'].Description) imask = (np.abs(mask_date - thisdatetime)).argmin() # 0=>no cloud, 1=>ice, 2=>liquid, 3=>unknown, probably ice # Ice below 120 m is probably an artifact if there is not ice above, # so if there is cloud below 120 m, we will ignore it, unless # there is also cloud betwen 120 and 200 m icld_all = cloud_mask[:,imask].data != 0 # .. If no cloud, set variables and return them now if not np.any(icld_all): cloud_layer = array.array('i') has_ice = array.array('i') has_liq = array.array('i') return cloud_layer, has_liq, has_ice alt = alt0 + height[icld_all] mask = cloud_mask[icld_all,imask].data alt_liq = alt[mask==2] alt_ice = alt[np.logical_or(mask==1, mask==3)] if not np.all(alt[alt<=200] >= 120): alt_ice = alt_ice[alt_ice>=120] alt_liq = alt_liq[alt_liq>=120] alt = alt[alt>=120] # .. To make things easy, if there is any cloud within # a model atmospheric layer, set the entire layer to cloudy # Remember we are going top down # Only try the range with cloud (+/- 30 m) alt = alt/1000; alt_liq = alt_liq/1000; alt_ice = alt_ice/1000 iz1 = np.where(z >= alt[-1])[0] if np.any(iz1): iz1 = iz1[-1] else: iz1 = 0 iz2 = np.where(z <= alt[0])[0] if np.any(iz2): iz2 = iz2[0]+1 else: iz2 = len(z)-3 if iz2 >= len(z)-2: iz2 = len(z)-3 if iz2 < iz1: print('pause here!') Npossible = iz2 - iz1 + 1 cloud_layer = array.array('i',(0 for i in range(Npossible))) has_ice = array.array('i',(0 for i in range(Npossible))) has_liq = array.array('i',(0 for i in range(Npossible))) i = 0 for iz in range(iz1,iz2+1): if np.any((alt > z[iz+1]) * (alt <= z[iz])): cloud_layer[i] = iz if np.any((alt_liq > z[iz+1]) * (alt_liq <= z[iz])): has_liq[i] = 1 if np.any((alt_ice > z[iz+1]) * (alt_ice <= z[iz])): has_ice[i] = 1 i += 1 cloud_layer = cloud_layer[:i] has_ice = has_ice[:i] has_liq = has_liq[:i] return cloud_layer, has_liq, has_ice
UTF-8
Python
false
false
12,736
py
247
loader.py
3
0.498822
0.47566
0
384
32.161458
83
animebing/course
13,537,736,926,573
0d1b2ecfd620c52d2af83afed391f6ad09391e6d
acd55085d1004e62c8b10fd5255779ee3d4a00ec
/deep_learning/cs231n/assignment1/cs231n/classifiers/PCA.py
3aec20e1fb8f60f3f7f1793cc8891d8aee84c738
[]
no_license
https://github.com/animebing/course
129e8084787e64dd9d373b6fee3ac3e5874281c9
c079dfa498a24c6aca5660105caaee24de97ff60
refs/heads/master
2021-01-02T08:25:12.961247
2017-08-01T16:28:18
2017-08-01T16:28:18
99,007,085
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def PCA(train, val, test, output_dim): n, dim = train.shape cov_matrix = np.dot(train.T, train)/n w, v = np.linalg.eig(cov_matrix) new_train = train.dot(w[:, :output_dim]) new_val = val.dot(w[:, :output_dim]) new_test = test.dot(w[:, :output_dim]) return new_test, new_val, new_test
UTF-8
Python
false
false
289
py
76
PCA.py
25
0.647059
0.647059
0
8
35.25
41
Mariia-Pinda/codereview
10,539,849,752,399
446836c615fc7d5fd9159b22a00e42ed9224b429
ff8040dd6518918f60f5c52147fa02e0ab7c1d57
/pindamv/first/task3.py
68090e2d498a44ede96ab7e8efe20c650ef3b517
[]
no_license
https://github.com/Mariia-Pinda/codereview
ade55d0a36b647d19389224d8719ee340142687d
9d04bcbf6ad3f140a3b19858d87c6d590c0c97fd
refs/heads/master
2020-11-27T06:13:46.918788
2019-12-22T10:15:00
2019-12-22T10:15:00
229,334,952
0
0
null
true
2019-12-20T20:55:29
2019-12-20T20:55:28
2019-12-20T20:36:50
2019-12-20T20:36:48
0
0
0
0
null
false
false
''' Дано 2 дійсних числа. Вивести середнє значення усіх цілих чисел, що знаходяться між цими двома дійсними числами. ''' the_first_float = float(input('enter the first float: ')) the_second_float = float(input('enter the second float: ')) range_1 = int(the_first_float) range_2 = int(the_second_float) for number in range(range_1, range_2+1): my_list = list(' '.join(number)) middle = sum(my_list)/len(my_list) print(middle)
UTF-8
Python
false
false
529
py
4
task3.py
4
0.700229
0.686499
0
12
35.5
112
openshift-eng/art-tools
5,411,658,831,104
4f18a9ae97e24640c77f471f7adb1376d96cb324
b73c58adc20bde3cf0f212012f3fc76d39ea2a6f
/elliott/functional_tests/test_find_builds.py
95a08b1508d594058f9af55d88a487c38e9d06b8
[ "LGPL-3.0-only", "Apache-2.0" ]
permissive
https://github.com/openshift-eng/art-tools
a4cef268cb7a8c0f74c96db787a72f41dcef7255
d24b5c78337d1fe73fa83e8d099809cbe9d9ed42
refs/heads/main
2023-09-01T17:32:25.906575
2023-09-01T15:36:09
2023-09-01T15:36:09
144,891,479
1
5
Apache-2.0
false
2023-09-14T13:35:34
2018-08-15T18:52:23
2023-08-11T13:00:23
2023-09-14T13:35:33
7,209
2
12
7
Python
false
false
import unittest import subprocess from functional_tests import constants # This test may start failing once this version is EOL and we either change the # ocp-build-data bugzilla schema or all of the non-shipped builds are garbage-collected. version = "4.3" class FindBuildsTestCase(unittest.TestCase): def test_find_rpms(self): out = subprocess.check_output( constants.ELLIOTT_CMD + [ "--assembly=stream", f"--group=openshift-{version}", "find-builds", "--kind=rpm", ] ) self.assertIn("may be attached to an advisory", out.decode("utf-8")) def test_find_images(self): out = subprocess.check_output( constants.ELLIOTT_CMD + [ f"--group=openshift-{version}", "-i", "openshift-enterprise-cli", "find-builds", "--kind=image", ] ) self.assertIn("may be attached to an advisory", out.decode("utf-8")) def test_change_state(self): """To attach a build to an advisory, it will be attempted to set the advisory to NEW_FILES. This advisory is already SHIPPED_LIVE, and the attempted change should fail""" command = constants.ELLIOTT_CMD + [ f'--group=openshift-{version}', '--images=openshift-enterprise-cli', 'find-builds', '--kind=image', '--attach=57899' ] result = subprocess.run(command, capture_output=True) self.assertEqual(result.returncode, 1) self.assertIn('Cannot change state', result.stdout.decode()) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
1,649
py
318
test_find_builds.py
271
0.602183
0.596119
0
48
33.354167
112
akshaymawale/code
5,205,500,388,564
a4b35e0d92b33ca693ea2d8b0a54b26c67f89fe8
b7073b9ded97b44e1df00dc17df66166778eb25f
/pull.py
0115ed58c8457ae302f5f7a3730e48f7c54ffd91
[]
no_license
https://github.com/akshaymawale/code
0efd10821bfbe91ae76372209c28f4bc72792721
13ad7406873dd553a98267969b91f226569f9cbd
refs/heads/master
2020-12-02T02:43:15.415456
2019-12-30T09:07:46
2019-12-30T09:07:46
230,861,729
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
print(" pull from local ")
UTF-8
Python
false
false
27
py
1
pull.py
1
0.666667
0.666667
0
1
26
26
jtdub/hier_config
14,413,910,285,754
915514efdb5ab7f189e2e61ea0e4f39e1a8e483b
f4ebd2324a649132ca9f44c08c42e1a4f48deacd
/tests/test_host.py
9b4dd8b2ad63756ca3158a94ae770f512a099221
[ "MIT" ]
permissive
https://github.com/jtdub/hier_config
85e2c4ff48edf6c1a4cc339441b96cf169f0dae7
cdc0f62ec207126d197bf4c38d7befd7e1a215a4
refs/heads/master
2021-06-03T15:07:25.641573
2021-04-21T15:26:49
2021-04-21T15:26:49
134,483,698
0
0
null
true
2018-05-22T22:44:02
2018-05-22T22:44:02
2018-05-22T20:09:05
2018-05-22T19:57:48
2,701
0
0
0
null
false
null
import pytest from hier_config.host import Host class TestHost: @pytest.fixture(autouse=True) def setup(self, options_ios): self.host = Host("example.rtr", "ios", options_ios) def test_load_config_from(self, running_config, generated_config): self.host.load_running_config(running_config) self.host.load_generated_config(generated_config) assert len(self.host.generated_config) > 0 assert len(self.host.running_config) > 0 def test_load_remediation(self, running_config, generated_config): self.host.load_running_config(running_config) self.host.load_generated_config(generated_config) self.host.remediation_config() assert len(self.host.remediation_config().children) > 0 def test_load_tags(self, tags_ios): self.host.load_tags(tags_ios) assert len(self.host.hconfig_tags) > 0 def test_filter_remediation(self, running_config, generated_config, tags_ios): self.host.load_running_config(running_config) self.host.load_generated_config(generated_config) self.host.load_tags(tags_ios) rem1 = self.host.remediation_config_filtered_text(set(), set()) rem2 = self.host.remediation_config_filtered_text({"safe"}, set()) assert rem1 != rem2
UTF-8
Python
false
false
1,305
py
4
test_host.py
2
0.678927
0.672797
0
37
34.27027
82
nickhester/basic_io_python
6,373,731,508,896
5d852093236ca7b12089a5f603db5ea806f429f6
224fc1e9192adeb1a3105e09845196790e42ce02
/CallApiAndLogPracticeProject.py
1ff1d797eea769df997298f4e33f167ef184078f
[]
no_license
https://github.com/nickhester/basic_io_python
6eb2f20b33518ed6edbca162813b151c3b9de589
0253124f0d6c10f1c5a8e39f3a661945af92f9a5
refs/heads/master
2022-04-22T08:04:30.154058
2020-04-21T02:32:32
2020-04-21T02:32:32
257,459,511
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import requests # read from file reader = open(sys.argv[1]) inputText = reader.read() reader.close() # call API response = requests.get("http://numbersapi.com/" + inputText, { 'Content-Type': 'application/json' }) if response.status_code != 200: raise Exception("API failed to return a successful response") responseText = response.content.decode('utf-8') # write to file writer = open(sys.argv[2], "w") writer.write(responseText) writer.close()
UTF-8
Python
false
false
464
py
1
CallApiAndLogPracticeProject.py
1
0.721983
0.709052
0
18
24.777778
101
dftidft/RecMe
11,416,023,116,746
ed31549e6836d922b939deac5f8e7ac21d1888b6
84c5983bdc93732a17a65f28e2db3668cb9a9457
/getCorners.py
33593c0a5dd7732cc30cb5eb7b87ad1113de8fb1
[]
no_license
https://github.com/dftidft/RecMe
47284524ba025ffcef2b82d490352e4b772e1d3d
48d83c28d151818fba1c71a7d8a116eebd64cbf0
refs/heads/master
2020-04-04T19:26:22.559260
2015-03-18T03:41:26
2015-03-18T03:41:26
32,023,678
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding=gbk import cv2 img = cv2.imread('g:/dataset/gochessboard/test1/00001.jpg') size = img.shape print size gray = cv2.cvtColor(img, cv2.cv.CV_RGB2GRAY) corners = cv2.goodFeaturesToTrack(gray, 500, 0.05, 10) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria) for i in range(corners.shape[0]): # print corners[i][0, 0] pt = (corners[i][0, 0], corners[i][0, 1]) cv2.circle(img, pt, 2, cv2.cv.RGB(0, 0, 255), 2) cv2.imshow('', img) cv2.waitKey()
UTF-8
Python
false
false
549
py
9
getCorners.py
9
0.661202
0.566485
0
20
26.45
75
rougier/gl-agg
18,236,431,157,639
5447b3bd47e90aebfb8d4246694a39d646971b71
efbe29a32d533d992d082ebcacb714731d9885d4
/demos/demo-graph.py
3991b9edc9f604b02fbdaa9627148ab38b1b3bd3
[]
no_license
https://github.com/rougier/gl-agg
d11cb793712fb150cd8146b6d4d3da1dbbdf2e65
f2f8297afcd63e8e396ba7d710e257e14c7fd25e
refs/heads/master
2023-08-30T15:00:53.589140
2022-08-31T08:15:44
2022-08-31T08:15:44
8,599,292
54
9
null
false
2022-08-31T08:15:45
2013-03-06T08:39:27
2022-06-29T05:10:25
2022-08-31T08:15:44
2,974
82
16
2
Python
false
false
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 Nicolas P. Rougier. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import sys import numpy as np import OpenGL.GL as gl import OpenGL.GLUT as glut from scipy.spatial import cKDTree from scipy.spatial.distance import cdist def graph(links = [(0,1), (1,2), (2,3), (3,0), (0,2), (1,3), (3,4), (4,5), (5,6), (6,7), (7,8), (8,9), (9,10), (10,7), (8,10), (7,9) ]): ntype = np.dtype( [('position', 'f4', 2), ('previous', 'f4', 2), ('weight', 'f4', 1), ('charge', 'f4', 1), ('fixed', 'b', 1)] ) ltype = np.dtype( [('source', 'i4', 1), ('target', 'i4', 1), ('strength', 'f4', 1), ('distance', 'f4', 1)] ) L = np.array(links).reshape(len(links),2) L -= L.min() n = L.max()+1 nodes = np.zeros(n, ntype) nodes['position'] = np.random.uniform(256-32, 256+32, (n,2)) nodes['previous'] = nodes['position'] nodes['fixed'] = False nodes['weight'] = 1 nodes['charge'] = 1 l = len(L) links = np.zeros( n+l, ltype) links[:n]['source'] = np.arange(0,n) links[:n]['target'] = np.arange(0,n) links[n:]['source'] = L[:,0] links[n:]['target'] = L[:,1] links['distance'] = 25 links['strength'] = 5 I = np.argwhere(links['source']==links['target']) links['distance'][I] = links['strength'][I] = 0 return nodes,links # ----------------------------------------------------------------------------- def relaxation(nodes, links): """ Gauss-Seidel relaxation for links """ sources_idx = links['source'] targets_idx = links['target'] sources = nodes[sources_idx] targets = nodes[targets_idx] distances = links['distance'] strengths = links['strength'] D = (targets['position'] - sources['position']) L = np.sqrt((D*D).sum(axis=1)) # This avoid to test L != 0 (I = np.where(L>0)) L = np.where(L,L,np.NaN) L = strengths * (L-distances) /L # Replace nan by 0, i.e. where L was 0 L = np.nan_to_num(L) D *= L.reshape(len(L),1) K = sources['weight'] / (sources['weight'] + targets['weight']) K = K.reshape(len(K),1) # Note that a direct nodes['position'][links['source']] += K*D*(1-F) # would't work as expected because of repeated indices F = nodes['fixed'][sources_idx].reshape(len(links),1) W = K*D*(1-F) * 0.1 nodes['position'][:,0] += np.bincount(sources_idx, W[:,0], minlength=len(nodes)) nodes['position'][:,1] += np.bincount(sources_idx, W[:,1], minlength=len(nodes)) F = nodes['fixed'][targets_idx].reshape(len(links),1) W = (1-K)*D*(1-F) * 0.1 nodes['position'][:,0] -= np.bincount(targets_idx, W[:,0], minlength=len(nodes)) nodes['position'][:,1] -= np.bincount(targets_idx, W[:,1], minlength=len(nodes)) # ----------------------------------------------------------------------------- def repulsion(nodes, links): P = nodes['position'] n = len(P) X,Y = P[:,0],P[:,1] dX,dY = np.subtract.outer(X,X), np.subtract.outer(Y,Y) dist = cdist(P,P) dist = np.where(dist, dist, 1) D = np.empty((n,n,2)) D[...,0] = dX/dist D[...,1] = dY/dist D = np.nan_to_num(D) R = D.sum(axis=1) L = np.sqrt(((R*R).sum(axis=0))) R /= L F = nodes['fixed'].reshape(len(nodes),1) P += 5*R*(1-F) # ----------------------------------------------------------------------------- def attraction(nodes, links): P = nodes['position'] F = nodes['fixed'].reshape(len(nodes),1) P += 0.01*((256,256) - P) * (1-F) # ----------------------------------------------------------------------------- def integration(nodes, links): P = nodes['position'].copy() F = nodes['fixed'].reshape(len(nodes),1) nodes['position'] -= ((nodes['previous']-P)*.9) * (1-F) nodes['previous'] = P # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1); gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) lines.draw() circles.draw() glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height) # ------------------------------------- def on_keyboard(key, x, y): if key == '\033': sys.exit() # ------------------------------------- def on_mouse(button, state, x, y): global drag, index drag = False nodes['fixed'] = False nodes['weight'] = 1 if state == 0: _,_,w,h = gl.glGetIntegerv( gl.GL_VIEWPORT ) P = nodes['position'] - (x,h-y) D = np.sqrt((P**2).sum(axis=1)) index = np.argmin(D) if D[index] < 10: nodes['fixed'][index] = True nodes['weight'][index] = 0.01 drag = True # ------------------------------------- def on_motion(x, y): global drag, mouse, index if drag: _,_,w,h = gl.glGetIntegerv( gl.GL_VIEWPORT ) nodes['position'][index] = x,h-y P = nodes['position'] circles.vertices.data['a_center'] = np.repeat(P,4,axis=0) circles._vbuffer._dirty = True src = nodes[links['source']]['position'] tgt = nodes[links['target']]['position'] src = np.repeat(src,4,axis=0) lines.vertices.data['a_p0'] = src tgt = np.repeat(tgt,4,axis=0) lines.vertices.data['a_p1'] = tgt lines._vbuffer._dirty = True glut.glutPostRedisplay() # ------------------------------------- def on_timer(fps): relaxation(nodes,links) repulsion(nodes,links) attraction(nodes,links) integration(nodes,links) # Update collection P = nodes['position'] circles.vertices.data['a_center'] = np.repeat(P,4,axis=0) circles._vbuffer._dirty = True src = nodes[links['source']]['position'] tgt = nodes[links['target']]['position'] src = np.repeat(src,4,axis=0) lines.vertices.data['a_p0'] = src tgt = np.repeat(tgt,4,axis=0) lines.vertices.data['a_p1'] = tgt lines._vbuffer._dirty = True glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ----------------------------------------------------------------------------- if __name__ == '__main__': from glagg import LineCollection, CircleCollection glut.glutInit(sys.argv) glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(512, 512) glut.glutCreateWindow("Dynamic graph") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) glut.glutMouseFunc(on_mouse) glut.glutMotionFunc(on_motion) nodes,links = graph( ) circles = CircleCollection() lines = LineCollection() for node in nodes: position = node['position'] circles.append(center = position, radius=5, linewidth=2, fg_color=(1,1,1,1), bg_color=(1,.5,.5,1)) src = nodes[links['source']]['position'] tgt = nodes[links['target']]['position'] V = np.array(zip(src,tgt)).reshape(2*len(src),2) lines.append(V, linewidth=1.5, color=(0.75,0.75,0.75,1.00)) drag,index = False, -1 fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutMainLoop()
UTF-8
Python
false
false
8,989
py
37
demo-graph.py
29
0.548226
0.524975
0
260
33.573077
84
GennadiiTurutin/nadiakochstore.com
11,587,821,806,810
c71c51c1ce98f45ebd072833258e67554dd6532e
7ec128cb017995d3bb875adcdaec86da7751cd2c
/store/accounts/models.py
68477a87bb573b4b6ad9c3a8c652ecdf27d026ee
[ "Apache-2.0" ]
permissive
https://github.com/GennadiiTurutin/nadiakochstore.com
edd12dc8579636176c8c4bdcd03beae5244b8082
4fad5cac3c9fffef9427ac787c6528fb2751dc6d
refs/heads/master
2020-04-26T01:50:03.182942
2019-03-03T03:57:51
2019-03-03T03:57:51
173,216,179
0
0
Apache-2.0
false
2020-02-11T23:48:34
2019-03-01T01:46:08
2019-03-03T03:57:05
2020-02-11T23:48:32
5,988
0
0
3
Tcl
false
false
from django.db import models # Create your models here. class Account(models.Model): title = models.CharField(max_length=120)
UTF-8
Python
false
false
127
py
15
models.py
11
0.779528
0.755906
0
5
24.6
41
thebe111/commander
3,461,743,652,632
161fe26cb0deaefb7963515bc4390a6b13705102
addccd666a95b4df1d33a3bb4995fc8be4b74185
/src/commander/core/exceptions.py
1bf79e4013bd1527b7fa64783867c56dc22d79f0
[ "MIT" ]
permissive
https://github.com/thebe111/commander
a1929b49b66473b778e96c398b816fbc2ab66f6f
b6ebd2ddfd1792b1db4012eb5917f478c48928d1
refs/heads/master
2023-07-02T16:14:15.732648
2021-07-05T21:14:08
2021-07-28T21:32:29
390,511,647
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys class CustomException(Exception): def __init__(self, msg: str): super().__init__(f"custom message here") def exceptions_resolver(instance): if "message" in instance: return sys.exit(f"Commander: {instance.message}") else: return sys.exit(f"Commander: {instance}")
UTF-8
Python
false
false
315
py
11
exceptions.py
7
0.644444
0.644444
0
13
23.230769
57
Project-Franchise/client-service
7,885,559,986,115
36b86ac5e411031f7eb1260dbc16fec87f7af339
47fca5bce8ee0bef7de27e63241b7b1052bfae20
/service_api/utils/__init__.py
2becf011e4e87676fee72211dadcd49cd54a8553
[]
no_license
https://github.com/Project-Franchise/client-service
30bac295be2717aaa5ba9368cbf0021e130bf0b8
80cd627f47e0499ddcb2c3313dbd67f1a90145c9
refs/heads/main
2023-05-02T13:15:18.829177
2021-05-25T06:28:38
2021-05-25T06:28:38
346,783,231
1
0
null
false
2021-05-25T06:28:39
2021-03-11T17:28:07
2021-05-25T05:39:13
2021-05-25T06:28:38
523
1
0
28
Python
false
false
""" Utilities for creating models and saving them in DB """ import datetime import json from functools import singledispatch from typing import Dict from marshmallow import ValidationError from marshmallow.schema import Schema from requests import Session from sqlalchemy.orm import make_transient from sqlalchemy import func from service_api import LOGGER, Base, session_scope from ..exceptions import (MetaDataError, ModelNotFoundException, ObjectNotFoundException) from ..models import Realty, RealtyDetails from ..schemas import RealtyDetailsSchema, RealtySchema @singledispatch def load_data(model_schema: Schema, data: Dict, model: Base) -> Base: """ Stores data in a database according to a given scheme """ try: valid_data = model_schema.load(data) record = model(**valid_data) except ValidationError as error: LOGGER.error("Error message:%s, data for validation %s", error, valid_data) raise with session_scope() as session: existing_record = session.query(model).filter_by(**valid_data).first() if existing_record is None: session.add(record) return existing_record or record @load_data.register def _(model_schema: RealtyDetailsSchema, data: Dict, model: RealtyDetails): try: valid_data = model_schema.load(data) realty_details_record = model(**valid_data) except ValidationError as error: LOGGER.error(error) raise with session_scope() as session: realty_details = session.query(model).filter_by( original_url=realty_details_record.original_url, version=realty_details_record.version).first() if realty_details is not None: incoming_data = model_schema.dump(realty_details_record) db_data = model_schema.dump(realty_details) incoming_data.pop("id") db_data.pop("id") if incoming_data == db_data: return realty_details with session_scope() as session: session.expire_on_commit = False session.query(model).filter_by( original_url=realty_details.original_url, version=realty_details.version).update( {"version": datetime.datetime.now()}) session.add(realty_details_record) session.commit() realty_record = session.query(Realty).filter_by( realty_details_id=realty_details.id).first() new_realty_details_id = session.query(model).filter_by( original_url=realty_details_record.original_url, version=realty_details_record.version).first().id make_transient(realty_record) realty_record.realty_details_id = new_realty_details_id realty_record.id = None del realty_record.id session.add(realty_record) with session_scope() as session: session.query(Realty).filter_by( realty_details_id=model_schema.dump(realty_details).get("id")).update( {"version": datetime.datetime.now()}) with session_scope() as session: session.add(realty_details_record) return realty_details_record @load_data.register def _(model_schema: RealtySchema, data: Dict, model: Realty): try: valid_data = model_schema.load(data) realty_record = model(**valid_data) LOGGER.debug("record.realty_details_id: %s", str(realty_record.realty_details_id)) except ValidationError as error: LOGGER.error(error) raise with session_scope() as session: realty = session.query(model).filter_by(realty_details_id=realty_record.realty_details_id).first() if realty is None: with session_scope() as session: session.add(realty_record) return realty or realty_record def open_metadata(path: str) -> Dict: """ Open file with metadata and return content """ try: with open(path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) except json.JSONDecodeError as error: LOGGER.error(error) raise MetaDataError from error except FileNotFoundError as error: LOGGER.error("Invalid metadata path, or metadata.json file does not exist") raise MetaDataError from error return metadata def recognize_by_alias(model: Base, alias: str, set_=None): """ Finds model record by alias. If set param is passed that alias is searched in that set :param model: Base :param alias: str :param set_: optional :returns: model instance :raises: ModelNotFoundException, ObjectNotFoundException """ try: table_of_aliases = model.aliases.mapper.class_ except AttributeError as error: LOGGER.error(error) raise ModelNotFoundException(desc="Model {} doesn't have aliases attribute".format(model)) from error with session_scope() as session: set_ = set_ or session.query(model) obj = set_.join(table_of_aliases, table_of_aliases.entity_id == model.id).filter( func.lower(table_of_aliases.alias) == alias.lower()).first() if obj is None: raise ObjectNotFoundException(message="Record for alias: {} not found".format(alias)) return obj def send_request(method: str, url: str, request_session: Session = None, *args, **kwargs): """ Wrapper for sending requests """ request_session = request_session or Session() response = request_session.request(method, url, *args, **kwargs) from ..services.limitation import LimitationSystem LimitationSystem().mark_token_after_request(response.url) return response def chunkify(number, pieces): """ Devide number on almost equal pieces """ return [number//pieces]*(pieces-1) + [sum(divmod(number, pieces))]
UTF-8
Python
false
false
5,814
py
101
__init__.py
70
0.666495
0.666151
0
165
34.236364
114
mohammedjasam/Competitive-Programming
1,949,915,170,711
dbfcbe8e51c2bd7bef1307b614a4741a27783bdf
fa1277b6939fbc1de8795a2c6e7c253637992f9e
/Unsolved!/GameOfThrowns.py
bb3d4a4c60ddeb738961299bb753c67d52350f6c
[]
no_license
https://github.com/mohammedjasam/Competitive-Programming
bb83171535c5d8626a94c5e0d3b3d37e9e3f1de8
4a5ab3c12693b50eea139f0bb2040fd5676ed069
refs/heads/master
2021-01-19T20:22:59.026871
2018-04-27T14:56:24
2018-04-27T14:56:24
83,748,705
4
2
null
null
null
null
null
null
null
null
null
null
null
null
null
n, k = map(int, input().split()) actions = input() actions = actions.replace("undo ", "*") actions = actions.split() # print(actions) childQueue = [] for x in range(n): childQueue.append(x) revchildQueue = list(reversed(childQueue)) # print(childQueue, revchildQueue) current = [0] # print('a, index location, value at index, current array') for a in actions: try: if int(a) > 0: current.append(childQueue[(current[-1] + int(a)) % n]) # print(a, (current[-1] + int(a)) % n, childQueue[(current[-1] + int(a)) % n], current) # print(current) elif int(a) < 0: current.append(revchildQueue[(current[-1] + int(a)) % n]) # print(a, (current[-1] + int(a)) % n, revchildQueue[(current[-1] + int(a)) % n], current) # print(current) except: if a[0] == "*": if int(a[1]) == 0: pass # print(current) else: current = current[:-int(a[1])] # print(a[1], current) # print(Undo) # print("\n\nFinal Solution") print(current[-1])
UTF-8
Python
false
false
1,120
py
97
GameOfThrowns.py
91
0.521429
0.508036
0
38
28.473684
102
n8sty/dowhy
11,974,368,861,048
c024d05f36ac0c62523572cabc0c6e88c0933592
8a564da3207f01e1827eac5396ab12068cd06663
/tests/test_causal_model.py
d2cc4ef035a059a891a4483854ca3d5e94f9436b
[ "MIT" ]
permissive
https://github.com/n8sty/dowhy
09478cbf0694da7f6d4d6ce4f67baaa5de06b964
0b5e2c3efa348ca232ecc6355f0fc6ec4458241a
refs/heads/main
2023-06-10T00:10:03.085219
2023-05-27T01:46:08
2023-05-27T01:46:08
333,647,965
0
0
MIT
true
2021-01-28T04:44:40
2021-01-28T04:44:40
2021-01-27T22:23:42
2021-01-27T21:57:02
20,540
0
0
0
null
false
false
import pandas as pd import pytest from flaky import flaky from pytest import mark from sklearn import linear_model import dowhy import dowhy.datasets from dowhy import CausalModel class TestCausalModel(object): @mark.parametrize( ["beta", "num_samples", "num_treatments"], [ (10, 100, 1), ], ) def test_external_estimator(self, beta, num_samples, num_treatments): num_common_causes = 5 data = dowhy.datasets.linear_dataset( beta=beta, num_common_causes=num_common_causes, num_samples=num_samples, num_treatments=num_treatments, treatment_is_binary=True, ) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], proceed_when_unidentifiable=True, test_significance=None, ) identified_estimand = model.identify_effect(proceed_when_unidentifiable=True) estimate = model.estimate_effect( identified_estimand, method_name="backdoor.tests.causal_estimators.mock_external_estimator.PropensityScoreWeightingEstimator", control_value=0, treatment_value=1, target_units="ate", # condition used for CATE confidence_intervals=True, method_params={"propensity_score_model": linear_model.LogisticRegression(max_iter=1000)}, ) assert estimate.estimator.propensity_score_model.max_iter == 1000 @mark.parametrize( ["beta", "num_instruments", "num_samples", "num_treatments"], [ (10, 1, 100, 1), ], ) def test_graph_input(self, beta, num_instruments, num_samples, num_treatments): num_common_causes = 5 data = dowhy.datasets.linear_dataset( beta=beta, num_common_causes=num_common_causes, num_instruments=num_instruments, num_samples=num_samples, num_treatments=num_treatments, treatment_is_binary=True, ) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], proceed_when_unidentifiable=True, test_significance=None, ) # removing two common causes gml_str = 'graph[directed 1 node[ id "{0}" label "{0}"]node[ id "{1}" label "{1}"]node[ id "Unobserved Confounders" label "Unobserved Confounders"]edge[source "{0}" target "{1}"]edge[source "Unobserved Confounders" target "{0}"]edge[source "Unobserved Confounders" target "{1}"]node[ id "X0" label "X0"] edge[ source "X0" target "{0}"] node[ id "X1" label "X1"] edge[ source "X1" target "{0}"] node[ id "X2" label "X2"] edge[ source "X2" target "{0}"] edge[ source "X0" target "{1}"] edge[ source "X1" target "{1}"] edge[ source "X2" target "{1}"] node[ id "Z0" label "Z0"] edge[ source "Z0" target "{0}"]]'.format( data["treatment_name"][0], data["outcome_name"] ) print(gml_str) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=gml_str, proceed_when_unidentifiable=True, test_significance=None, missing_nodes_as_confounders=True, ) common_causes = model.get_common_causes() assert all(node_name in common_causes for node_name in ["X1", "X2"]) @mark.parametrize( ["beta", "num_instruments", "num_samples", "num_treatments"], [ (10, 1, 100, 1), ], ) def test_graph_input2(self, beta, num_instruments, num_samples, num_treatments): num_common_causes = 5 data = dowhy.datasets.linear_dataset( beta=beta, num_common_causes=num_common_causes, num_instruments=num_instruments, num_samples=num_samples, num_treatments=num_treatments, treatment_is_binary=True, ) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], proceed_when_unidentifiable=True, test_significance=None, ) # removing two common causes gml_str = """graph[ directed 1 node[ id "{0}" label "{0}" ] node [ id "{1}" label "{1}" ] node [ id "Unobserved Confounders" label "Unobserved Confounders" ] edge[ source "{0}" target "{1}" ] edge[ source "Unobserved Confounders" target "{0}" ] edge[ source "Unobserved Confounders" target "{1}" ] node[ id "X0" label "X0" ] edge[ source "X0" target "{0}" ] node[ id "X1" label "X1" ] edge[ source "X1" target "{0}" ] node[ id "X2" label "X2" ] edge[ source "X2" target "{0}" ] edge[ source "X0" target "{1}" ] edge[ source "X1" target "{1}" ] edge[ source "X2" target "{1}" ] node[ id "Z0" label "Z0" ] edge[ source "Z0" target "{0}" ]]""".format( data["treatment_name"][0], data["outcome_name"] ) print(gml_str) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=gml_str, proceed_when_unidentifiable=True, test_significance=None, missing_nodes_as_confounders=True, ) common_causes = model.get_common_causes() assert all(node_name in common_causes for node_name in ["X1", "X2"]) @mark.parametrize( ["beta", "num_instruments", "num_samples", "num_treatments"], [ (10, 1, 100, 1), ], ) def test_graph_input3(self, beta, num_instruments, num_samples, num_treatments): num_common_causes = 5 data = dowhy.datasets.linear_dataset( beta=beta, num_common_causes=num_common_causes, num_instruments=num_instruments, num_samples=num_samples, num_treatments=num_treatments, treatment_is_binary=True, ) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], proceed_when_unidentifiable=True, test_significance=None, ) # removing two common causes gml_str = """dag { "Unobserved Confounders" [pos="0.491,-1.056"] X0 [pos="-2.109,0.057"] X1 [adjusted, pos="-0.453,-1.562"] X2 [pos="-2.268,-1.210"] Z0 [pos="-1.918,-1.735"] v0 [latent, pos="-1.525,-1.293"] y [outcome, pos="-1.164,-0.116"] "Unobserved Confounders" -> v0 "Unobserved Confounders" -> y X0 -> v0 X0 -> y X1 -> v0 X1 -> y X2 -> v0 X2 -> y Z0 -> v0 v0 -> y } """ print(gml_str) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=gml_str, proceed_when_unidentifiable=True, test_significance=None, missing_nodes_as_confounders=True, ) common_causes = model.get_common_causes() assert all(node_name in common_causes for node_name in ["X1", "X2"]) all_nodes = model._graph.get_all_nodes(include_unobserved=True) assert all( node_name in all_nodes for node_name in ["Unobserved Confounders", "X0", "X1", "X2", "Z0", "v0", "y"] ) all_nodes = model._graph.get_all_nodes(include_unobserved=False) assert "Unobserved Confounders" not in all_nodes @mark.parametrize( ["beta", "num_instruments", "num_samples", "num_treatments"], [ (10, 1, 100, 1), ], ) def test_graph_input4(self, beta, num_instruments, num_samples, num_treatments): num_common_causes = 5 data = dowhy.datasets.linear_dataset( beta=beta, num_common_causes=num_common_causes, num_instruments=num_instruments, num_samples=num_samples, num_treatments=num_treatments, treatment_is_binary=True, ) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], proceed_when_unidentifiable=True, test_significance=None, ) # removing two common causes gml_str = "tests/sample_dag.txt" print(gml_str) model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=gml_str, proceed_when_unidentifiable=True, test_significance=None, missing_nodes_as_confounders=True, ) common_causes = model.get_common_causes() assert all(node_name in common_causes for node_name in ["X1", "X2"]) all_nodes = model._graph.get_all_nodes(include_unobserved=True) assert all( node_name in all_nodes for node_name in ["Unobserved Confounders", "X0", "X1", "X2", "Z0", "v0", "y"] ) all_nodes = model._graph.get_all_nodes(include_unobserved=False) assert "Unobserved Confounders" not in all_nodes @mark.parametrize( ["num_variables", "num_samples"], [ (5, 5000), ], ) @flaky(max_runs=3) def test_graph_refutation(self, num_variables, num_samples): data = dowhy.datasets.dataset_from_random_graph(num_vars=num_variables, num_samples=num_samples) df = data["df"] model = CausalModel( data=df, treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], ) graph_refutation_object = model.refute_graph( k=1, independence_test={ "test_for_continuous": "partial_correlation", "test_for_discrete": "conditional_mutual_information", }, ) assert graph_refutation_object.refutation_result == True @mark.parametrize( ["num_variables", "num_samples"], [ (10, 5000), ], ) def test_graph_refutation2(self, num_variables, num_samples): data = dowhy.datasets.dataset_from_random_graph(num_vars=num_variables, num_samples=num_samples) df = data["df"] gml_str = """ graph [ directed 1 node [ id 0 label "a" ] node [ id 1 label "b" ] node [ id 2 label "c" ] node [ id 3 label "d" ] node [ id 4 label "e" ] node [ id 5 label "f" ] node [ id 6 label "g" ] node [ id 7 label "h" ] node [ id 8 label "i" ] node [ id 9 label "j" ] edge [ source 0 target 1 ] edge [ source 0 target 3 ] edge [ source 3 target 2 ] edge [ source 7 target 4 ] edge [ source 6 target 5 ] edge [ source 7 target 8 ] edge [ source 9 target 2 ] edge [ source 9 target 8 ] ] """ model = CausalModel( data=df, treatment=data["treatment_name"], outcome=data["outcome_name"], graph=gml_str, ) graph_refutation_object = model.refute_graph( k=2, independence_test={ "test_for_continuous": "partial_correlation", "test_for_discrete": "conditional_mutual_information", }, ) assert graph_refutation_object.refutation_result == False def test_unobserved_graph_variables_log_warning(self, caplog): data = dowhy.datasets.linear_dataset( beta=10, num_common_causes=3, num_instruments=1, num_effect_modifiers=1, num_samples=3, treatment_is_binary=True, stddev_treatment_noise=2, num_discrete_common_causes=1, ) df = data["df"] # Remove graph variable with name "W0" from observed data. df = df.drop(columns=["W0"]) expected_warning_message_regex = ( "1 variables are assumed unobserved because they are not in the " "dataset. Configure the logging level to `logging.WARNING` or " "higher for additional details." ) with pytest.warns( UserWarning, match=expected_warning_message_regex, ): _ = CausalModel( data=df, treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], ) # Ensure that a log record exists that provides a more detailed view # of observed and unobserved graph variables (counts and variable names.) expected_logging_message = ( "The graph defines 7 variables. 6 were found in the dataset " "and will be analyzed as observed variables. 1 were not found " "in the dataset and will be analyzed as unobserved variables. " "The observed variables are: '['W1', 'W2', 'X0', 'Z0', 'v0', 'y']'. " "The unobserved variables are: '['W0']'. " "If this matches your expectations for observations, please continue. " "If you expected any of the unobserved variables to be in the " "dataframe, please check for typos." ) assert any( log_record for log_record in caplog.records if ( (log_record.name == "dowhy.causal_model") and (log_record.levelname == "WARNING") and (log_record.message == expected_logging_message) ) ), ( "Expected logging message informing about unobserved graph variables " "was not found. Expected a logging message to be emitted in module `dowhy.causal_model` " f"and with level `logging.WARNING` and this content '{expected_logging_message}'. " f"Only the following log records were emitted instead: '{caplog.records}'." ) if __name__ == "__main__": pytest.main([__file__])
UTF-8
Python
false
false
15,723
py
289
test_causal_model.py
184
0.511989
0.494817
0
502
30.320717
623
carlcarl/lazyhub
11,123,965,316,962
f5dc21479acc08ddec70480bfe07936e70857cba
9e278eeb8a05ec25a657b11a9bf7b6f7fa6ca7b3
/lazyhub/views.py
a81cf2d3afd6ade2de3aa60b091f3d67a1d68bb7
[ "MIT" ]
permissive
https://github.com/carlcarl/lazyhub
7b3aef3fa267b863a23367bc4a79b909edfabdf9
b98c86f2845d0b3b07d97885cead9506e9d71453
refs/heads/master
2023-07-12T18:06:46.400114
2015-09-01T03:52:45
2015-09-01T03:52:45
15,317,402
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import render from django.views.decorators.http import require_http_methods import json from django.http import HttpResponse from libs import github_query, date_filter from forms import QueryForm def index(request): return render(request, 'lazyhub/index.html') @require_http_methods(['GET']) def query(request, account, days): data = {'error': {'account': False, 'days': False}} if request.is_ajax(): form = QueryForm({'account': account, 'days': days}) if form.is_valid(): account = form.cleaned_data['account'] days = form.cleaned_data['days'] data['data'] = github_query(account) data['data'] = date_filter(data['data'], days) else: for k, v in data['error'].iteritems(): if k in form.errors: data['error'][k] = True data = json.dumps(data) return HttpResponse(data, content_type='application/json')
UTF-8
Python
false
false
973
py
13
views.py
6
0.622816
0.622816
0
28
33.75
62
ypolaris-com/module_publish_test
3,770,981,303,813
98bb697f37a122a69284c0d2b1d779423c03a327
16a1aa1fdc154ae166a54b491884d878ac01e22e
/Print_module/__init__.py
b745968536a0857facbfcf3d95216ff17bce6afb
[]
no_license
https://github.com/ypolaris-com/module_publish_test
e3ebde2b0a09c05522f72aaae9f4b0358bc6d211
6ee3cc95236733719fe277718df3e6808400c247
refs/heads/main
2023-08-31T18:39:20.685596
2021-11-10T13:30:11
2021-11-10T13:30:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from Print_module.Star_print import *
UTF-8
Python
false
false
37
py
4
__init__.py
3
0.810811
0.810811
0
1
37
37
Rafaelbarr/100DaysOfCodeChallenge
15,358,803,059,082
de6a728a823c08dc660dac70e2e79c78fcc3a46d
c2c89df45a6640498bf9d292ef8c054e60633b52
/day006/001_100_years_old.py
2c7283d12c23dfe96a4dd1b22d9cf2c301a03e30
[]
no_license
https://github.com/Rafaelbarr/100DaysOfCodeChallenge
8841f70fd03a64f1a6c53f302b830baa16821d81
db75fd45dda2485fd52cbcbff4473d66514cc578
refs/heads/master
2022-01-07T10:23:34.474085
2018-04-27T05:36:22
2018-04-27T05:36:22
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- #This program asks te user for his/her name and age #Then return the year he/she will be 100 years old def run(): #Data input name = raw_input('What\'s your name?: ') age = int(raw_input('What\'s your age?: ')) #Result calculation older = (2018 - age) + 100 #Result output print('You\'ll be 100 years old in the {} year!'.format(older)) if __name__ == '__main__': run()
UTF-8
Python
false
false
447
py
57
001_100_years_old.py
55
0.57047
0.53915
0
18
22.944444
67
mindspore-ai/mindspore
5,042,291,634,900
de7e93515c94f00cb06aa1ab51330e8ffdb5bde8
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
/tests/st/hcom/hcom_sparsetensor.py
1bce2fa712b889e157fad1c7d4cf808cbaac9c2a
[ "Apache-2.0", "LicenseRef-scancode-proprietary-license", "MPL-1.0", "OpenSSL", "LGPL-3.0-only", "LicenseRef-scancode-warranty-disclaimer", "BSD-3-Clause-Open-MPI", "MIT", "MPL-2.0-no-copyleft-exception", "NTP", "BSD-3-Clause", "GPL-1.0-or-later", "0BSD", "MPL-2.0", "LicenseRef-scancode-free-unknown", "AGPL-3.0-only", "Libpng", "MPL-1.1", "IJG", "GPL-2.0-only", "BSL-1.0", "Zlib", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-python-cwi", "BSD-2-Clause", "LicenseRef-scancode-gary-s-brown", "LGPL-2.1-only", "LicenseRef-scancode-other-permissive", "Python-2.0", "LicenseRef-scancode-mit-nagy", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "Unlicense" ]
permissive
https://github.com/mindspore-ai/mindspore
ca7d5bb51a3451c2705ff2e583a740589d80393b
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
refs/heads/master
2023-07-29T09:17:11.051569
2023-07-17T13:14:15
2023-07-17T13:14:15
239,714,835
4,178
768
Apache-2.0
false
2023-07-26T22:31:11
2020-02-11T08:43:48
2023-07-26T10:48:16
2023-07-26T22:31:11
769,725
3,608
645
143
C++
false
false
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import os import numpy as np from mindspore.communication.management import get_rank from mindspore import Tensor from mindspore import Parameter from mindspore import context from mindspore.ops import operations as P import mindspore.nn as nn from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.communication.management import init from mindspore.communication.management import get_group_size class FakeDataInitMode: RandomInit = 0 OnesInit = 1 UniqueInit = 2 ZerosInit = 3 class FakeData: def __init__(self, size=1024, batch_size=32, image_size=(3, 224, 224), num_class=10, random_offset=0, use_parallel=False, fakedata_mode=FakeDataInitMode.RandomInit): self.size = size self.rank_batch_size = batch_size self.total_batch_size = self.rank_batch_size self.random_offset = random_offset self.image_size = image_size self.num_class = num_class self.rank_size = 1 self.rank_id = 0 self.batch_index = 0 self.image_data_type = np.float32 self.label_data_type = np.float32 self.is_onehot = True self.fakedata_mode = fakedata_mode if use_parallel: if 'CONTEXT_DEVICE_TARGET' in os.environ and os.environ['CONTEXT_DEVICE_TARGET'] == 'GPU': init(backend_name='nccl') else: init(backend_name='hccl') self.rank_size = get_group_size() self.rank_id = get_rank() self.total_batch_size = self.rank_batch_size * self.rank_size assert self.size % self.total_batch_size == 0 self.total_batch_data_size = (self.rank_size, self.rank_batch_size) + image_size def get_dataset_size(self): return int(self.size / self.total_batch_size) def get_reeat_count(self): return 1 def set_image_data_type(self, data_type): self.image_data_type = data_type def set_label_data_type(self, data_type): self.label_data_type = data_type def set_label_onehot(self, is_onehot=True): self.is_onehot = is_onehot def create_tuple_iterator(self, num_epochs=-1, do_copy=False): return self def __getitem__(self, batch_index): if batch_index * self.total_batch_size >= len(self): raise IndexError("{} index out of range".format(self.__class__.__name__)) rng_state = np.random.get_state() np.random.seed(batch_index + self.random_offset) if self.fakedata_mode == FakeDataInitMode.OnesInit: img = np.ones(self.total_batch_data_size) elif self.fakedata_mode == FakeDataInitMode.ZerosInit: img = np.zeros(self.total_batch_data_size) elif self.fakedata_mode == FakeDataInitMode.UniqueInit: total_size = 1 for i in self.total_batch_data_size: total_size = total_size* i img = np.reshape(np.arange(total_size)*0.0001, self.total_batch_data_size) else: img = np.random.randn(*self.total_batch_data_size) target = np.random.randint(0, self.num_class, size=(self.rank_size, self.rank_batch_size)) np.random.set_state(rng_state) img = img[self.rank_id] target = target[self.rank_id] img_ret = img.astype(self.image_data_type) target_ret = target.astype(self.label_data_type) if self.is_onehot: target_onehot = np.zeros(shape=(self.rank_batch_size, self.num_class)) target_onehot[np.arange(self.rank_batch_size), target] = 1 target_ret = target_onehot.astype(self.label_data_type) return Tensor(img_ret), Tensor(target_ret) def __len__(self): return self.size def __iter__(self): self.batch_index = 0 return self def reset(self): self.batch_index = 0 def __next__(self): if self.batch_index * self.total_batch_size < len(self): data = self[self.batch_index] self.batch_index += 1 return data raise StopIteration class NetWithSparseGatherV2(nn.Cell): def __init__(self, strategy=None, sparse=True): super(NetWithSparseGatherV2, self).__init__() self.axis = 0 self.sparse = sparse if sparse: self.weight = Parameter(Tensor(np.ones([8, 8]).astype(np.float32)), name="weight") self.gather = P.SparseGatherV2() else: self.weight = Parameter(Tensor(np.ones([8, 8]).astype(np.float32)), name="weight") self.gather = P.Gather() if strategy is not None: self.gather.shard(strategy) def construct(self, indices): x = self.gather(self.weight, indices, self.axis) return x def train_mindspore_impl(self, indices, epoch, batch_size, use_parallel=True): ds = FakeData(size=8, batch_size=batch_size, num_class=8, image_size=(), use_parallel=use_parallel) ds.set_image_data_type(np.int32) net = self net.set_train() loss = nn.SoftmaxCrossEntropyWithLogits() optimizer = nn.Adam(net.trainable_params()) optimizer.target = "CPU" model = Model(net, loss, optimizer) for _ in range(epoch): model.train(1, ds, dataset_sink_mode=False) output = net(indices) return output def test_allreduce_sparsegatherv2_adam_auto_parallel(): context.set_context(mode=context.GRAPH_MODE, device_target='Ascend') init(backend_name='hccl') context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL, device_num=8, gradients_mean=True) indices = Tensor(np.array([0, 1, 2, 3, 4, 5, 6, 7]).astype(np.int32)) epoch = 3 batch_size = 1 net = NetWithSparseGatherV2(sparse=True) output_sparse = net.train_mindspore_impl(indices, epoch, batch_size) net = NetWithSparseGatherV2(sparse=False) output = net.train_mindspore_impl(indices, epoch, batch_size) assert np.allclose(output.asnumpy(), output_sparse.asnumpy(), 0.001, 0.001)
UTF-8
Python
false
false
6,736
py
15,926
hcom_sparsetensor.py
11,911
0.636283
0.622922
0
172
38.162791
114
huanhuan077/python_1
1,400,159,371,452
96510c29811c2fe4dabc719ce1e8136182676e34
1da84e2c2818542f21e045be620df7c13f878225
/test_items/mianshi/socket_server.py
b9dff90514a2b3f8833b155e311a1e1a63176323
[]
no_license
https://github.com/huanhuan077/python_1
3ea0f1978a3c9f67e86e036fcfaf06dbbc7d8730
f3d6371e95e05b4c5e306bdaf04ef1535dbbbe2e
refs/heads/master
2020-04-03T11:27:26.640285
2020-04-01T03:08:37
2020-04-01T03:08:37
155,222,218
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding:utf-8 import socket # 服务端 s = socket.socket() s.bind(('127.0.0.1', 6666)) s.listen(5) while True: c,addr = s.accept() print('连接地址',addr) c.send('welcome') c.close()
UTF-8
Python
false
false
205
py
114
socket_server.py
74
0.591623
0.528796
0
13
13.769231
27