repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jueal/python_challenge | 395,136,995,134 | 2ff8fd466a766c3207bde8988d44ad6b26331ef4 | c66fc17e679d0aa1579865f09ee8fb4a05da03fb | /challenge_0/challenge_0.py | 2a19320e1e1529408d1dedf49be56054b61b8a46 | [] | no_license | https://github.com/jueal/python_challenge | f7249d4eea01a68ace33689d0160bd8ee9995312 | f6a70ce3e32d21776fc2137ca9cfda851203a557 | refs/heads/master | 2022-04-25T14:50:04.388370 | 2020-05-09T16:08:49 | 2020-05-09T16:08:49 | 261,938,292 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # compute 2^38, and change the url.
print(2 ** 38) | UTF-8 | Python | false | false | 51 | py | 14 | challenge_0.py | 13 | 0.647059 | 0.529412 | 0 | 3 | 16.333333 | 35 |
collective/collective.imagetags | 6,657,199,328,731 | b6a262f705764b599fbc304b2a0b76df273f1a2e | 7e5eb06a2347906adf63f95bf9c1fe79240d059b | /collective/imagetags/setuphandlers.py | d0fd68fbc0e8b5f4f636dae0d10c5c5be22e0186 | [] | no_license | https://github.com/collective/collective.imagetags | 59f4839851ec0b6c203df97e0b197c13d1d81691 | c758da883fb75e4a9098fda00be5919a23f1864b | refs/heads/master | 2023-03-22T14:39:43.802855 | 2013-05-28T10:54:36 | 2013-05-28T10:54:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from zope.site.hooks import getSite
from Products.CMFCore.utils import getToolByName
from StringIO import StringIO
import transaction
def updateKupu(context):
# Ordinarily, GenericSetup handlers check for the existence of XML files.
# Here, we are not parsing an XML file, but we use this text file as a
# flag to check that we actually meant for this import step to be run.
# The file is found in profiles/default.
if context.readDataFile('collective.imagetags_kupu.txt') is None:
return
# Add additional setup code here
out = StringIO()
portal = getSite()
# Get kupu tool and update its paragraph_styles property
kt = getToolByName(portal, 'kupu_library_tool', None)
if kt:
new_style = 'Show tags|img|imagetags-show'
styles = kt.getParagraphStyles()
if not new_style in styles:
styles.append(new_style)
kt.configure_kupu(parastyles=styles)
transaction.savepoint()
print >> out, "Updated paragraph_styles in kupu: %s" % new_style
else:
print >> out, "kupu already has %s in paragraph_styles" % new_style
context.getLogger("collective.imagetags").info(out.getvalue())
return out.getvalue()
| UTF-8 | Python | false | false | 1,254 | py | 51 | setuphandlers.py | 19 | 0.673844 | 0.673844 | 0 | 35 | 34.8 | 79 |
RedgeCastelino/Master_thesis_shared | 4,982,162,108,181 | 4542afbce276fa8e614a2cf903c2d7463414fb80 | 5425b9dbb15da20faaca1cfd98cebef8a5423216 | /src/sensor_model/scripts/ego_reader.py | c7eacca656af2790e1d8d0380531cf5a9ce1c599 | [] | no_license | https://github.com/RedgeCastelino/Master_thesis_shared | ce30be3906f6968859c93e508cbe4ace56de0237 | de2f4b229f3df4f219a08f3d4d7e8d3d40750c55 | refs/heads/main | 2023-03-12T12:32:36.555096 | 2021-03-01T14:34:57 | 2021-03-01T14:34:57 | 343,441,415 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import rospy
import math
from osi3_bridge.msg import GroundTruthMovingObjects, TrafficUpdateMovingObject
from rotate import rotate
def vehicle_reader():
# Node initialization
rospy.init_node('ego_reader', anonymous=False) # Start node
rospy.Subscriber("/osi3_moving_obj", GroundTruthMovingObjects, callback)
rospy.spin() # spin() simply keeps python from exiting until this node is stopped
def callback(osi_objs):
ego_data = find_ego(osi_objs)
header = osi_objs.header
public_ego(ego_data,header)
def find_ego(osi_objs):
global ego_dataCOPY
# find the smaller id number inside the list
ID = osi_objs.objects[0].id
IDpos = 0
for i in range(len(osi_objs.objects)):
if osi_objs.objects[i].id < ID: # take into account that the EGO is the first spawn Object
ID = osi_objs.objects[i].id
IDpos = i
# Assign the object with smaller ID to EGO
ego = osi_objs.objects[IDpos]
# Assign all other ID's to the obj_list
#osi_objs_noego = [x for x in osi_objs.objects if not x.id == ID]
return ego
def public_ego(ego,header):
global egoyaw #ego orientation in map frame (dont forget redge and maikol)
ego_data = TrafficUpdateMovingObject()
ego_data.header.stamp = header.stamp
ego_data.header.frame_id = "EGO"
#ego_data to have ego parameters in ego frame (example velocity of ego in x and y directions of ego / longitudinal and lateral velocity)
ego_data.object = ego
[ego_data.object.velocity.x, ego_data.object.velocity.y] = rotate(ego.velocity.x,ego.velocity.y,-ego.orientation.yaw)
[ego_data.object.acceleration.x, ego_data.object.acceleration.y] = rotate(ego.acceleration.x,ego.acceleration.y,-ego.orientation.yaw)
egoyaw= ego.orientation.yaw
pub = rospy.Publisher('ego_data', TrafficUpdateMovingObject, queue_size=10)
pub.publish(ego_data)
if __name__ == '__main__':
vehicle_reader()
| UTF-8 | Python | false | false | 1,964 | py | 278 | ego_reader.py | 122 | 0.698574 | 0.695519 | 0 | 56 | 34.071429 | 141 |
Pratiknarola/algopy | 8,469,675,533,030 | e54287477dc6737bd11eca04d31ee363b16fa3e2 | d67b50553717612c4db0577db268ac0b42750ad0 | /sorting/gnome_sort.py | 8db54de63ffba1449734f7b553b3e21787ea8b36 | [
"MIT"
] | permissive | https://github.com/Pratiknarola/algopy | 24436b12bdfc52e61fcd6cd706925ebb2ad9236f | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | refs/heads/master | 2021-07-17T05:55:37.839127 | 2019-10-15T03:57:04 | 2019-10-15T03:57:04 | 211,533,758 | 4 | 3 | MIT | false | 2020-10-07T08:35:55 | 2019-09-28T17:01:41 | 2020-01-15T15:51:40 | 2019-10-15T03:57:05 | 19 | 4 | 2 | 1 | Python | false | false | '''
Created by: Pratik Narola (https://github.com/Pratiknarola)
last modified: 14-10-2019
'''
'''
Gnome Sort also called Stupid sort is based on the concept of a Garden Gnome sorting his flower pots.
A garden gnome sorts the flower pots by the following method-
He looks at the flower pot next to him and the previous one;
if they are in the right order he steps one pot forward, otherwise he swaps them and steps one pot backwards.
If there is no previous pot (he is at the starting of the pot line), he steps forwards;
if there is no pot next to him (he is at the end of the pot line), he is done.
'''
# A function to sort the given list using Gnome sort
def gnomeSort( arr, n):
index = 0
while index < n:
if index == 0:
index = index + 1
if arr[index] >= arr[index - 1]:
index = index + 1
else:
arr[index], arr[index-1] = arr[index-1], arr[index]
index = index - 1
return arr
| UTF-8 | Python | false | false | 982 | py | 16 | gnome_sort.py | 15 | 0.64664 | 0.630346 | 0 | 32 | 29.6875 | 109 |
alipay/alipay-sdk-python-all | 12,429,635,369,970 | fc0ba900947e158fcb6b6294df860d6f5f5d2b5d | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayOpenMiniInnerversionUploadstatusQueryResponse.py | aac3c75e9696c97d568043f172c16e7699a0c6ba | [
"Apache-2.0"
] | permissive | https://github.com/alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | false | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | 2023-04-20T12:00:51 | 2023-04-16T10:42:27 | 12,485 | 221 | 54 | 27 | Python | false | false | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniInnerversionUploadstatusQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniInnerversionUploadstatusQueryResponse, self).__init__()
self._build_info = None
self._build_package_url = None
self._build_status = None
self._log_url = None
self._need_rotation = None
self._new_build_package_url = None
self._new_result_url = None
self._result_url = None
self._version_created = None
@property
def build_info(self):
return self._build_info
@build_info.setter
def build_info(self, value):
self._build_info = value
@property
def build_package_url(self):
return self._build_package_url
@build_package_url.setter
def build_package_url(self, value):
self._build_package_url = value
@property
def build_status(self):
return self._build_status
@build_status.setter
def build_status(self, value):
self._build_status = value
@property
def log_url(self):
return self._log_url
@log_url.setter
def log_url(self, value):
self._log_url = value
@property
def need_rotation(self):
return self._need_rotation
@need_rotation.setter
def need_rotation(self, value):
self._need_rotation = value
@property
def new_build_package_url(self):
return self._new_build_package_url
@new_build_package_url.setter
def new_build_package_url(self, value):
self._new_build_package_url = value
@property
def new_result_url(self):
return self._new_result_url
@new_result_url.setter
def new_result_url(self, value):
self._new_result_url = value
@property
def result_url(self):
return self._result_url
@result_url.setter
def result_url(self, value):
self._result_url = value
@property
def version_created(self):
return self._version_created
@version_created.setter
def version_created(self, value):
self._version_created = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniInnerversionUploadstatusQueryResponse, self).parse_response_content(response_content)
if 'build_info' in response:
self.build_info = response['build_info']
if 'build_package_url' in response:
self.build_package_url = response['build_package_url']
if 'build_status' in response:
self.build_status = response['build_status']
if 'log_url' in response:
self.log_url = response['log_url']
if 'need_rotation' in response:
self.need_rotation = response['need_rotation']
if 'new_build_package_url' in response:
self.new_build_package_url = response['new_build_package_url']
if 'new_result_url' in response:
self.new_result_url = response['new_result_url']
if 'result_url' in response:
self.result_url = response['result_url']
if 'version_created' in response:
self.version_created = response['version_created']
| UTF-8 | Python | false | false | 3,311 | py | 9,331 | AlipayOpenMiniInnerversionUploadstatusQueryResponse.py | 9,330 | 0.632135 | 0.631833 | 0 | 105 | 30.533333 | 124 |
shuaiwang123/ICAMS | 11,355,893,570,225 | 2af44c6ece2c9bc3a234babe0501a685396fc46f | 20ddd21fc3dea03e4ed0003e64603423c4c54a4d | /icams/download_era5.py | 359b55c470805442867d311262e2dcc94a731dc0 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | https://github.com/shuaiwang123/ICAMS | 554d239e5af65263059961cc0ff17d12866469e3 | 0308bcf3bc512d24ad9df848d1020ea559afcda2 | refs/heads/main | 2023-04-04T17:48:17.217816 | 2021-04-24T13:55:47 | 2021-04-24T13:55:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
#################################################################
### This program is part of ICAMS v1.0 ###
### Copy Right (c): 2020, Yunmeng Cao ###
### Author: Yunmeng Cao ###
### Contact : ymcmrs@gmail.com ###
#################################################################
import os
import sys
import numpy as np
import argparse
from icams import _utils as ut
import glob
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def read_region(STR):
WEST = STR.split('/')[0]
EAST = STR.split('/')[1].split('/')[0]
SOUTH = STR.split(EAST+'/')[1].split('/')[0]
NORTH = STR.split(EAST+'/')[1].split('/')[1]
WEST =float(WEST)
SOUTH=float(SOUTH)
EAST=float(EAST)
NORTH=float(NORTH)
return WEST,SOUTH,EAST,NORTH
def get_meta_corner(meta):
if 'Y_FIRST' in meta.keys():
lat0 = float(meta['Y_FIRST'])
lon0 = float(meta['X_FIRST'])
lat_step = float(meta['Y_STEP'])
lon_step = float(meta['X_STEP'])
lat1 = lat0 + lat_step * (length - 1)
lon1 = lon0 + lon_step * (width - 1)
NORTH = lat0
SOUTH = lat1
WEST = lon0
EAST = lon1
else:
lats = [float(meta['LAT_REF{}'.format(i)]) for i in [1,2,3,4]]
lons = [float(meta['LON_REF{}'.format(i)]) for i in [1,2,3,4]]
lat0 = np.max(lats[:])
lat1 = np.min(lats[:])
lon0 = np.min(lons[:])
lon1 = np.max(lons[:])
NORTH = lat0 + 0.1
SOUTH = lat1 + 0.1
WEST = lon0 + 0.1
EAST = lon1 + 0.1
return WEST,SOUTH,EAST,NORTH
def unitdate(DATE):
LE = len(str(int(DATE)))
DATE = str(int(DATE))
if LE==5:
DATE = '200' + DATE
if LE == 6:
YY = int(DATE[0:2])
if YY > 80:
DATE = '19' + DATE
else:
DATE = '20' + DATE
return DATE
def read_txt_line(txt):
# Open the file with read only permit
f = open(txt, "r")
# use readlines to read all lines in the file
# The variable "lines" is a list containing all lines in the file
lines = f.readlines()
lines0 = [line.strip() for line in lines]
f.close()
# remove empty lines
lines_out = []
for line in lines0:
if not len(line) ==0:
lines_out.append(line)
return lines_out
def get_lack_datelist(date_list, date_list_exist):
date_list0 = []
for k0 in date_list:
if k0 not in date_list_exist:
date_list0.append(k0)
return date_list0
def era5_time(research_time0):
research_time = round(float(research_time0) / 3600)
if len(str(research_time)) == 1:
time0 = '0' + str(research_time)
else:
time0 = str(research_time)
return time0
def ceil_to_5(x):
"""Return the closest number in multiple of 5 in the larger direction"""
assert isinstance(x, (int, np.int16, np.int32, np.int64)), 'input number is not int: {}'.format(type(x))
if x % 5 == 0:
return x
return x + (5 - x % 5)
def floor_to_5(x):
"""Return the closest number in multiple of 5 in the lesser direction"""
assert isinstance(x, (int, np.int16, np.int32, np.int64)), 'input number is not int: {}'.format(type(x))
return x - x % 5
def floor_to_1(x):
"""Return the closest number in multiple of 5 in the lesser direction"""
assert isinstance(x, (int, np.int16, np.int32, np.int64)), 'input number is not int: {}'.format(type(x))
return x - x % 1
def ceil_to_1(x):
"""Return the closest number in multiple of 5 in the larger direction"""
assert isinstance(x, (int, np.int16, np.int32, np.int64)), 'input number is not int: {}'.format(type(x))
if x % 1 == 0:
return x
return x + (1 - x % 1)
def floor_to_2(x):
"""Return the closest number in multiple of 5 in the lesser direction"""
assert isinstance(x, (int, np.int16, np.int32, np.int64)), 'input number is not int: {}'.format(type(x))
return x - x % 2
def ceil_to_2(x):
"""Return the closest number in multiple of 5 in the larger direction"""
assert isinstance(x, (int, np.int16, np.int32, np.int64)), 'input number is not int: {}'.format(type(x))
if x % 2 == 0:
return x
return x + (2 - x % 2)
def get_snwe(wsen, min_buffer=0.5, multi_1=True):
# get bounding box
lon0, lat0, lon1, lat1 = wsen
# lat/lon0/1 --> SNWE
S = np.floor(min(lat0, lat1) - min_buffer).astype(int)
N = np.ceil( max(lat0, lat1) + min_buffer).astype(int)
W = np.floor(min(lon0, lon1) - min_buffer).astype(int)
E = np.ceil( max(lon0, lon1) + min_buffer).astype(int)
# SNWE in multiple of 5
if multi_1:
S = floor_to_1(S)
W = floor_to_1(W)
N = ceil_to_1(N)
E = ceil_to_1(E)
return (S, N, W, E)
def get_fname_list(date_list,area,hr):
flist = []
for k0 in date_list:
f0 = 'ERA-5{}_{}_{}.grb'.format(area, k0, hr)
flist.append(f0)
return flist
def snwe2str(snwe):
"""Get area extent in string"""
if not snwe:
return None
area = ''
s, n, w, e = snwe
if s < 0:
area += '_S{}'.format(abs(s))
else:
area += '_N{}'.format(abs(s))
if n < 0:
area += '_S{}'.format(abs(n))
else:
area += '_N{}'.format(abs(n))
if w < 0:
area += '_W{}'.format(abs(w))
else:
area += '_E{}'.format(abs(w))
if e < 0:
area += '_W{}'.format(abs(e))
else:
area += '_E{}'.format(abs(e))
return area
def cmdLineParse():
parser = argparse.ArgumentParser(description='Extract the SAR synchronous GPS tropospheric products.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=INTRODUCTION+'\n'+EXAMPLE)
parser.add_argument('--region',dest='region', help='research area in degree. w/e/n/s')
parser.add_argument('--region-file',dest='region_file', help='mintPy formatted h5 file, which contains corner infomration.')
# region and region-file should provide at least one
parser.add_argument('--time',dest='time', help='interested research UTC-time in seconds. e.g., 49942')
parser.add_argument('--time-file',dest='time_file', help='mintPy formatted h5 file, which contains CENTER_LINE_UTC')
# time and time-file should provide at least one
parser.add_argument('--date-list', dest='date_list', nargs='*',help='date list to extract.')
parser.add_argument('--date-txt', dest='date_txt',help='date list text to extract.')
parser.add_argument('--date-file', dest='date_file',help='mintPy formatted h5 file, which contains date infomration.')
# date-list, date-txt, date-file should provide at least one.
inps = parser.parse_args()
if (not (inps.region or inps.region_file)) or (not (inps.time or inps.time_file)) or (not (inps.date_list or inps.date_txt or inps.date_file)):
parser.print_usage()
sys.exit(os.path.basename(sys.argv[0])+': error: region, time, and date information should be provided.')
return inps
###################################################################################################
INTRODUCTION = '''
Download ERA-5 re-analysis-datasets from the latest ECMWF platform:
'https://cds.climate.copernicus.eu/api/v2'
-------------------------------------------------------------------------------
coverage temporal_resolution spatial_resolution latency analysis
Global Hourly 0.25 deg (~31 km) 3-month 4D-var
-------------------------------------------------------------------------------
'''
EXAMPLE = '''Examples:
download_era5.py --region 122/124/33/35 --time 49942 --date-list 20180101 20180102
download_era5.py --region 122/124/33/35 --time 49942 --date-txt ~/date_list.txt
download_era5.py --region-file velocity.h5 --time-file velocity.h5 --date-txt ~/date_list.txt
download_era5.py --region-file timeseries.h5 --time-file velocity.h5 --date-file timeseries.h5
download_era5.py --region-file timeseries.h5 --time-file velocity.h5 --date-list 20180101 20180102
download_era5.py --region-file 122/124/33/35 --time-file velocity.h5 --date-file timeseries.h5
download_era5.py --region-file 122/124/33/35 --time 49942 --date-file timeseries.h5
'''
##################################################################################################
def main(argv):
inps = cmdLineParse()
root_dir = os.getcwd()
icams_dir = root_dir + '/icams'
era5_dir = icams_dir + '/ERA5'
raw_dir = era5_dir + '/raw'
sar_dir = era5_dir + '/sar'
if not os.path.isdir(icams_dir):
print('icams folder is not found under the corrent directory.')
print('Generate folder: %s' % icams_dir)
os.mkdir(icams_dir)
if not os.path.isdir(era5_dir):
print('ERA5 folder is not found under the corrent directory.')
print('Generate folder: %s' % era5_dir)
os.mkdir(era5_dir)
if not os.path.isdir(raw_dir):
print('Generate raw-data folder: %s' % raw_dir)
os.mkdir(raw_dir)
# Get research region (w, s, e, n)
if inps.region:
w,s,e,n = read_region(inps.region)
elif inps.region_file:
meta_region = ut.read_attr(inps.region_file)
w,s,e,n = get_meta_corner(meta_region)
wsen = (w,s,e,n)
snwe = get_snwe(wsen, min_buffer=0.5, multi_1=True)
area = snwe2str(snwe)
# Get research time (utc in seconds)
if inps.time:
research_time = inps.time
elif inps.time_file:
meta_time = ut.read_attr(inps.time_file)
research_time = meta_time['CENTER_LINE_UTC']
hour = era5_time(research_time)
# Get date list
date_list = []
if inps.date_list: date_list = inps.date_list
if inps.date_txt:
date_list2 = read_txt_line(inps.date_txt)
for list0 in date_list2:
if (list0 not in date_list) and is_number(list0):
date_list.append(list0)
if inps.date_file:
date_list3 = ut.read_hdf5(inps.date_file, datasetName='date')[0]
date_list3 = date_list3.astype('U13')
for list0 in date_list3:
if (list0 not in date_list) and is_number(list0):
date_list.append(list0)
flist = get_fname_list(date_list,area,hour)
date_list = list(map(int, date_list))
date_list = sorted(date_list)
date_list = list(map(str, date_list))
flist_era5_exist = [os.path.basename(x) for x in glob.glob(raw_dir + '/ERA-5*')]
flist_download = []
date_list_download = []
for f0 in flist:
if not f0 in flist_era5_exist:
f00 = os.path.join(raw_dir, f0) # add basedir
flist_download.append(f00)
date_list_download.append(f0.split('_')[5])
print('')
print('Total number of ERA5-data need to be downloaded: %s' % str(len(flist)))
print('Existed number of ERA5-data : %s' % str(len(flist)-len(flist_download)))
print('Number of the to be downloaded ERA5-data : %s' % str(len(flist_download)))
print('')
ut.ECMWFdload(date_list_download, hour, era5_dir, model='ERA5', snwe=snwe, flist = flist_download)
#s,n,w,e = snwe
#if not os.path.isfile('geometry_era5.h5'):
# print('')
# print('Start to generate the geometry file over the EAR5-data region...')
# AREA = '" ' + str(w - 0.1) + '/' + str(e + 0.1) + '/' + str(s - 0.1) + '/' + str(n + 0.1) + ' "'
# #print(AREA)
# call_str = 'generate_geometry_era5.py --region ' + AREA + ' --resolution 10000 -o geometry_era5.h5'
# os.system(call_str)
#else:
# print('geometry_ear5.h5 exist.')
# print('skip the step of generating the geometry file.')
print('Done.')
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
| UTF-8 | Python | false | false | 12,478 | py | 11 | download_era5.py | 9 | 0.54536 | 0.515547 | 0 | 367 | 32.99455 | 147 |
MICROBAO/Statistical-Prgramming | 15,822,659,551,820 | b9892a20fab9f0afe816ec66792df75882e7fe18 | da3994d65dc846b8ef661f1654e6f667a4dbe8d1 | /3.PrincipalComponentAnalysis(PCA)/PCA.py | 9c22b7475d66306b69733ec33384a14913a71e59 | [] | no_license | https://github.com/MICROBAO/Statistical-Prgramming | 6646e689bf2cdcacd20d487459f7bd66c2363020 | 87c65bb9c2ebb64ed1009a8ab22911f3289b3745 | refs/heads/master | 2021-01-10T16:58:09.788652 | 2016-02-18T04:35:50 | 2016-02-18T04:35:50 | 51,475,788 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Dongzhe Li 104434089
# coding: utf-8
#Principal component analysis. The input is a data set in the form of an nxp matrix X.
#The output includes the principle components Q and the corresponding variances Lambda
def my_pca(A):
T = 1000
A_copy = A.copy()
r, c = A_copy.shape
V = np.random.random_sample((r, r))
for i in range(T):
n, m = V.shape
R = V.copy()
Q = np.eye(n)
for k in range(m-1):
x = np.zeros((n, 1))
x[k:, 0] = R[k:, k]
v = x
v[k] = x[k] + np.sign(x[k,0]) * np.linalg.norm(x)
s = np.linalg.norm(v)
if s != 0:
u = v / s
R -= 2 * np.dot(u, np.dot(u.T, R))
Q -= 2 * np.dot(u, np.dot(u.T, Q))
Q = Q.T
V = np.dot(A_copy, Q)
#Q, R = qr(V)
mv, nv = V.shape
R = V.copy()
Q = np.eye(nv)
for k in range(mv-1):
x = np.zeros((nv, 1))
x[k:, 0] = R[k:, k]
v = x
v[k] = x[k] + np.sign(x[k,0]) * np.linalg.norm(x)
s = np.linalg.norm(v)
if s != 0:
u = v / s
R -= 2 * np.dot(u, np.dot(u.T, R))
Q -= 2 * np.dot(u, np.dot(u.T, Q))
#Q = Q.T
return R.diagonal(), Q.T
from sklearn import datasets
from sklearn.decomposition import PCA
iris = datasets.load_iris()
X = iris.data[:, :5]
X_reduced = PCA(n_components=4)
X_reduced.fit_transform(X)
X_reduced.explained_variance_
#this is the pca from package sklearn which is based on SVD, the result is similar to our pca function
X = iris.data[:, :5]
A = np.cov(X.T)
D, V = my_pca(A)
print D.round(6)
print V.round(6)
# Compare the result with the numpy calculation
eigen_value_gt, eigen_vector_gt = np.linalg.eig(A)
print eigen_value_gt.round(6)
print eigen_vector_gt.round(6)
| UTF-8 | Python | false | false | 1,846 | py | 28 | PCA.py | 21 | 0.51896 | 0.5 | 0 | 74 | 23.864865 | 103 |
rolandn/login_recaptcha | 3,959,959,875,380 | c0d79bd8ca501378b7fda296153ecaa25f815364 | 5eb5cc0f0b3decd46406853986eb03beb77dd55b | /__manifest__.py | ec755c0047dce2c4d7fbe88917cd16817da48f4e | [] | no_license | https://github.com/rolandn/login_recaptcha | 99d05fb06821338e1db59b09608046ddf75360ab | 9e232d7688a34b93610ff5f8461a5f6ff14e2b5e | refs/heads/master | 2020-07-15T12:07:09.571878 | 2019-08-31T14:54:00 | 2019-08-31T14:54:00 | 205,557,825 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to odoo
#
# Copyright (C) 2016 - Turkesh Patel. <http://www.almightycs.com>
#
# @author Turkesh Patel <info@almightycs.com>
###########################################################################
{
'name': "Login reCAPTCHA",
'category': "web",
'version': "1.0.0",
'summary': """Make odoo login more secure with google reCAPTCHA""",
'description': """Make odoo login more secure with google reCAPTCHA. This module integrates google recaptcha to your odoo login and helps prevent bot attacks
Secure odoo
make your odoo more secure
secure login
reCAPTCHA
login recaptcha
login security
avoid bot attack
security
secure acccount
account security
""",
'author': 'Almighty Consulting Services',
"website": 'http://www.almightycs.com',
'depends': ['website','base_setup'],
'data': [
'view/template.xml',
'view/res_config_view.xml',
],
'images': [
'static/description/odoo-login-recaptcha-turkeshpatel-almightycs',
],
'installable': True,
'auto_install': False,
'application': False,
"price": 20,
"currency": "EUR",
}
| UTF-8 | Python | false | false | 1,278 | py | 6 | __manifest__.py | 3 | 0.552426 | 0.544601 | 0 | 42 | 29.428571 | 161 |
prakharrr/15619-Cloud-Computing | 19,104,014,556,906 | 086e931e58dd67c100e38ee66f307f0141c23cea | da7589f6b4e689495a7ccf4673aff2114feab87e | /Project2/Project2.4 Auto Scaling/project2_4.py | 21de20c170cdc8ab54214f1270f72d5b227032ba | [] | no_license | https://github.com/prakharrr/15619-Cloud-Computing | 74059170abf366b8e5a23483f1f49a612d546d65 | a3bb1042607b5e8be3d04f8ffad5cb7366895806 | refs/heads/master | 2020-11-25T04:21:08.477445 | 2014-05-08T20:13:11 | 2014-05-08T20:13:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys
import datetime
import boto
import boto.ec2.connection
import boto.route53.connection
import boto.route53.hostedzone
import boto.route53.record
import boto.route53.exception
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScalingGroup
from boto.ec2.autoscale import LaunchConfiguration
from boto.ec2.autoscale import ScalingPolicy
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import MetricAlarm
from time import sleep
import os
from boto.ec2.connection import EC2Connection
import boto.ec2.elb
from boto.ec2.elb import HealthCheck
AWS_ACCESS_KEY = 'AKIAIHXVS6TDDIYJG75Q'
AWS_SECRET_KEY = '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'
CONNECT_REGION = 'us-east-1'
CONNECT_AVAILABILITY_ZONE = 'us-east-1b'
AMI = 'ami-99e2d4f0'
KEY_NAME = 'Project2_1'
INSTANCE_TYPE = 'm1.small'
script_one='./apache_bench.sh sample.jpg 100000 100 '
# script_one='./apache_bench.sh sample.jpg 100 10 '
script_two=' logfile > '
file_name = 'out'
MIN_SIZE = 2
MAX_SIZE = 5
ARN = 'arn:aws:sns:us-east-1:782142299950:demo'
ASG_NAME = 'my_asg_group13'
LB_NAME = 'mylb13'
CONFIG_NAME = '15619-launch_confi13'
boto.config.add_section('Credentials')
boto.config.set('Credentials', 'aws_access_key_id', AWS_ACCESS_KEY)
boto.config.set('Credentials', 'aws_secret_access_key', AWS_SECRET_KEY)
##################### ec2 instance
instance_ids = []
instance_cnt = 0
throughput = 0
ec2_conn = boto.ec2.connect_to_region(CONNECT_REGION,aws_access_key_id=AWS_ACCESS_KEY,aws_secret_access_key=AWS_SECRET_KEY)
security_groups=ec2_conn.get_all_security_groups()
print security_groups
security_group=security_groups[2]
print security_group
##################### elb_conn
boto.ec2
regions = boto.ec2.elb.regions()
print regions
elb_conn = boto.ec2.elb.connect_to_region(CONNECT_REGION,aws_access_key_id=AWS_ACCESS_KEY,aws_secret_access_key=AWS_SECRET_KEY)
# ec2_conn = boto.ec2.connect_to_region("us-east-1")
list = elb_conn.get_all_load_balancers()
print list
hc = HealthCheck(
interval=240,
target='HTTP:8080/upload'
)
zones = [CONNECT_AVAILABILITY_ZONE]
ports = [(80, 80, 'http'), (8080, 8080, 'http')]
lb = elb_conn.create_load_balancer(LB_NAME, zones, ports)
##################### autoscale group
asg_conn = boto.ec2.autoscale.connect_to_region(CONNECT_REGION,aws_access_key_id=AWS_ACCESS_KEY,aws_secret_access_key=AWS_SECRET_KEY)
launch_config = LaunchConfiguration(name=CONFIG_NAME, image_id=AMI, key_name=KEY_NAME, security_groups=[security_group], instance_type=INSTANCE_TYPE, instance_monitoring=True)
asg_conn.create_launch_configuration(launch_config)
asg = AutoScalingGroup(group_name=ASG_NAME, load_balancers=[LB_NAME],
availability_zones=[CONNECT_AVAILABILITY_ZONE],
launch_config=launch_config, min_size=MIN_SIZE, max_size=MAX_SIZE, connection=asg_conn)
asg_conn.create_auto_scaling_group(asg)
scale_up_policy = ScalingPolicy(
name='scale_up', adjustment_type='ChangeInCapacity',
as_name=ASG_NAME, scaling_adjustment=1, cooldown=300)
scale_down_policy = ScalingPolicy(
name='scale_down', adjustment_type='ChangeInCapacity',
as_name=ASG_NAME, scaling_adjustment=-1, cooldown=300)
asg_conn.create_scaling_policy(scale_up_policy)
asg_conn.create_scaling_policy(scale_down_policy)
##################### cloud watch
cw_conn = boto.ec2.cloudwatch.connect_to_region(CONNECT_REGION,aws_access_key_id=AWS_ACCESS_KEY,aws_secret_access_key=AWS_SECRET_KEY)
alarm_dimensions = {"AutoScalingGroupName": ASG_NAME}
scale_up_policy = asg_conn.get_all_policies(
as_group=ASG_NAME, policy_names=['scale_up'])[0]
scale_down_policy = asg_conn.get_all_policies(
as_group=ASG_NAME, policy_names=['scale_down'])[0]
scale_up_alarm = MetricAlarm(
name='scale_up_on_cpu', namespace='AWS/EC2',
metric='CPUUtilization', statistic='Average',
comparison='>', threshold='80',
period='300', evaluation_periods=1,
alarm_actions=[scale_up_policy.policy_arn,ARN],
dimensions=alarm_dimensions)
scale_down_alarm = MetricAlarm(
name='scale_down_on_cpu', namespace='AWS/EC2',
metric='CPUUtilization', statistic='Average',
comparison='<', threshold='20',
period='300', evaluation_periods=1,
alarm_actions=[scale_down_policy.policy_arn,ARN],
dimensions=alarm_dimensions)
cw_conn.create_alarm(scale_up_alarm)
cw_conn.create_alarm(scale_down_alarm)
sleep(120)
lb.configure_health_check(hc)
print lb.dns_name
print "done"
# lb.delete()
# print 'total instances:', instance_cnt
| UTF-8 | Python | false | false | 4,667 | py | 23 | project2_4.py | 20 | 0.707307 | 0.677309 | 0 | 157 | 28.726115 | 175 |
sencheng/memories-improve-sensory-representations | 10,737,418,265,959 | 49e6ce1b86c0f791813553e2f02c07d270def3c4 | 143bdceaabba3fd1809469752a877419a2d4a372 | /old3/ccn/replay_prediction.py | ef6c9fc87c15e31a81fec5c43f6084234cf28a10 | [] | no_license | https://github.com/sencheng/memories-improve-sensory-representations | e6786428d45ab493d04c15917b40cb890b6c1c7f | 2faa96c4c298274fb0b10aa9f4443cc5eae38e8d | refs/heads/master | 2020-11-24T23:14:45.767068 | 2019-12-16T13:46:56 | 2019-12-16T13:46:56 | 228,381,621 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
(Figure 7)
Processes and plots data that was generated using :py:mod:`learnrate2`. Delta values of SFA features
and the prediction quality of the regressors are plotted against number of training repetitions of SFA2.
Also, scatter plots latent variable vs. regressor prediction are shown.
"""
from core import semantic, system_params, input_params, tools, streamlined, sensory, result
import numpy as np
import os
import pickle
from matplotlib import pyplot as plt
import matplotlib
import sklearn.linear_model
import scipy.stats
if __name__ == "__main__":
PLOT_XY = True
PLOT_PHI = True
TEST_DATA = 1 # 0: Training only, 1: New only, 2: Both
LEARNRATE2 = True
EXP_MODE = 1 # 0: both, 1: linear, 2: square
MIX = False
PATH_PRE = "/local/results/" # Prefix for where to load results from
# PATH_PRE = "/media/goerlrwh/Extreme Festplatte/results/"
PATH = PATH_PRE + "replay_o18/" # Directory to load results from
# colors = [['b', 'c'], ['r', 'm'], ['g', 'y'], ['k', '0.5']]
colors = ['b', 'r', 'g', 'k']
linest = ['-', '--', ':']
dcolors = ['k', 'k', 'k', 'k']
dstyles = ['--', '-', '-.', ':']
ccolors = ['g', 'g', 'b', 'b']
cstyles = ['-', '--', '-', '--']
rcolor = '0.6'
rstyle = '--'
DCNT = 3
SINGLED = False
NFEAT = 3
# Whitening settings
# of the data
WHITENER = True
NORM = False
# for analysis
CORRNORM = False
DELTANORM = True
# matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['lines.markersize'] = 22
matplotlib.rcParams['lines.markeredgewidth'] = 2
matplotlib.rcParams['lines.linewidth'] = 2
font = {'family' : 'Sans',
'size' : 22}
matplotlib.rc('font', **font)
fig=plt.figure(figsize=(10,5))
NRUNS = 40
eps_list = [0.0005]
NSEQS = TEST_DATA if TEST_DATA > 0 else 1
NEPS = len(eps_list)
PARAMETERS = system_params.SysParamSet()
if os.path.isfile(PATH + "st1.p"):
with open(PATH + "st1.p", 'rb') as f:
PARAMETERS.st1 = pickle.load(f)
else:
PARAMETERS.st1.update(dict(number_of_snippets=100, snippet_length=100, movement_type='gaussian_walk', movement_params=dict(dx=0.05, dt=0.05, step=5),
object_code=input_params.make_object_code('TL'), sequence=[0,1], input_noise=0.1))
if WHITENER:
with open(PATH + "whitener.p", 'rb') as f:
whitener = pickle.load(f)
PARAMETERS.st1['number_of_snippets'] = 50
sample_parms = dict(PARAMETERS.st1)
sample_parms['number_of_snippets'] = 1
sample_parms['movement_type'] = 'sample'
sample_parms['movement_params'] = dict()
sensory_system = pickle.load(open(PATH + "sensory.p", 'rb'))
ran = np.arange(PARAMETERS.st1['number_of_snippets'])
sensys2 = sensory.SensorySystem(PARAMETERS.input_params_default, save_input=False)
parmdict = dict(PARAMETERS.st1)
parmdict['movement_type'] = 'sample'
parmdict['movement_params'] = dict(x_range=None, y_range=None, t_range=[0], x_step=0.05, y_step=0.05, t_step=22.5)
parmdict['number_of_snippets'] = 1
parmdict['snippet_length'] = None
training_sequence, training_categories, training_latent = sensory_system.generate(fetch_indices=False, **PARAMETERS.st1)
new_sequence1, new_categories, new_latent = sensory_system.generate(fetch_indices=False, **PARAMETERS.st1)
tcat = np.array(training_categories)
tlat = np.array(training_latent)
ncat = np.array(new_categories)
nlat = np.array(new_latent)
input_delta = np.mean(tools.delta_diff(new_sequence1))
sfa1 = semantic.load_SFA(PATH + "sfa1.p")
training_sequence = semantic.exec_SFA(sfa1,training_sequence)
if NORM:
training_sequence = streamlined.normalizer(training_sequence, PARAMETERS.normalization)(training_sequence)
elif WHITENER:
training_sequence = whitener(training_sequence)
target_matrix = np.append(tlat, tcat[:, None], axis=1)
new_sequence1 = semantic.exec_SFA(sfa1,new_sequence1)
if NORM:
new_sequence1 = streamlined.normalizer(new_sequence1, PARAMETERS.normalization)(new_sequence1)
elif WHITENER:
new_sequence1 = whitener(new_sequence1)
if DELTANORM:
intermediate_delta1 = np.mean(np.sort(tools.delta_diff(streamlined.normalizer(new_sequence1, PARAMETERS.normalization)(new_sequence1)))[:DCNT])
else:
intermediate_delta1 = np.mean(np.sort(tools.delta_diff(new_sequence1))[:DCNT])
learnerLo = sklearn.linear_model.LinearRegression()
learnerLo.fit(training_sequence[:,:], target_matrix)
predictionLo = learnerLo.predict(new_sequence1[:, :])
_, _, r_XLo, _, _ = scipy.stats.linregress(nlat[:, 0], predictionLo[:, 0])
_, _, r_YLo, _, _ = scipy.stats.linregress(nlat[:, 1], predictionLo[:, 1])
_, _, r_CLo, _, _ = scipy.stats.linregress(ncat, predictionLo[:, 4])
print(r_XLo, r_YLo, r_CLo)
b1_sfa = semantic.load_SFA(PATH + "b1.sfa")
b1_y_n = semantic.exec_SFA(b1_sfa, new_sequence1)
b1_w_n = streamlined.normalizer(b1_y_n, PARAMETERS.normalization)(b1_y_n)
if DELTANORM:
b1_ds_n = np.sort(tools.delta_diff(b1_w_n))[:DCNT]
else:
b1_ds_n = np.sort(tools.delta_diff(b1_y_n))[:DCNT]
b1_d_n = np.mean(b1_ds_n)
all_d_n = tools.delta_diff(np.concatenate((new_latent, new_categories[:,None]), axis=1))
x_d_n = all_d_n[0]
y_d_n = all_d_n[1]
cat_d_n = all_d_n[4]
lat_d_n = np.mean([x_d_n, y_d_n, cat_d_n])
b1_corr_n = np.abs(tools.feature_latent_correlation(b1_y_n, new_latent, new_categories))
b1_xycorr_n = np.mean([np.max(b1_corr_n[0,:]),np.max(b1_corr_n[1,:])])
training_matrix = semantic.exec_SFA(b1_sfa, training_sequence)
if CORRNORM:
training_matrix = streamlined.normalizer(training_matrix, PARAMETERS.normalization)(training_matrix)
learner = sklearn.linear_model.LinearRegression()
learner.fit(training_matrix[:,:NFEAT], target_matrix)
if CORRNORM:
prediction = learner.predict(b1_w_n[:, :NFEAT])
else:
prediction = learner.predict(b1_y_n[:,:NFEAT])
# prediction = learner.predict(b1_w_n)
_, _, r_valueX, _, _ = scipy.stats.linregress(nlat[:, 0], prediction[:, 0])
_, _, r_valueY, _, _ = scipy.stats.linregress(nlat[:, 1], prediction[:, 1])
_, _, r_valueC, _, _ = scipy.stats.linregress(ncat, prediction[:, 4])
b1_xyr = np.mean((r_valueX, r_valueY))
# b1_catcorr_n = np.max(b1_corr_n[4, :])
b1_catcorr_n = r_valueC
x = list(range(NRUNS))
# fd, ad = plt.subplots(1, NEPS*2, sharex=True, sharey=False)
# fd.text(0.5, 0.04, 'number of forming repetitions', ha='center', va='center')
# axy[0].set_ylabel('r of variable prediction')
# axy[0].set_title('correlation of\nfeatures with latents')
corlst1 = []
corlst2 = []
corlst1_2 = []
corlst2_2 = []
for ei, eps in enumerate(eps_list):
inc1_d = []
inc2_d = []
inc1_mean = []
inc1_var = []
inc1_xycorr, inc1_catcorr, inc1_xyr = [], [], []
for i in range(NRUNS):
inc1_sfa = inc2_sfa = semantic.load_SFA(PATH + "inc1_eps{}_{}.sfa".format(ei+1, i))
inc1_y = semantic.exec_SFA(inc1_sfa, new_sequence1)
inc1_w = streamlined.normalizer(inc1_y, PARAMETERS.normalization)(inc1_y)
if DELTANORM:
inc1_ds = np.sort(tools.delta_diff(inc1_w))[:DCNT]
else:
inc1_ds = np.sort(tools.delta_diff(inc1_y))[:DCNT]
inc1_mean.append(np.mean(inc1_y))
inc1_var.append(np.var(inc1_y))
inc1_d.append(np.mean(inc1_ds))
inc1_corr_raw = tools.feature_latent_correlation(inc1_y, new_latent, new_categories)
inc1_corr = np.abs(inc1_corr_raw)
if ei == 1:
corlst1.append(inc1_corr_raw)
if ei == 2:
corlst1_2.append(inc1_corr_raw)
inc1_xycorr.append(np.mean([np.max(inc1_corr[0, :]), np.max(inc1_corr[1, :])]))
training_matrix = semantic.exec_SFA(inc1_sfa, training_sequence)
if CORRNORM:
training_matrix = streamlined.normalizer(training_matrix, PARAMETERS.normalization)(training_matrix)
learner = sklearn.linear_model.LinearRegression()
learner.fit(training_matrix[:,:NFEAT], target_matrix)
if CORRNORM:
prediction = learner.predict(inc1_w[:, :NFEAT])
else:
prediction = learner.predict(inc1_y[:,:NFEAT])
# prediction = learner.predict(inc1_w)
_, _, r_valueX, _, _ = scipy.stats.linregress(nlat[:, 0], prediction[:, 0])
_, _, r_valueY, _, _ = scipy.stats.linregress(nlat[:, 1], prediction[:, 1])
_, _, r_valueC, _, _ = scipy.stats.linregress(ncat, prediction[:, 4])
inc1_xyr.append(np.mean((r_valueX, r_valueY)))
inc1_catcorr.append(r_valueC)
# inc1_catcorr.append(np.max(inc1_corr[4, :]))
# axxy = axy[ei]
# line_br, = fig.plot(x, [b1_xyr] * NRUNS, label="batch r", c=ccolors[0], ls=cstyles[0])
# line_bcat, = fig.plot(x, [b1_catcorr_n] * NRUNS, label="batch cat", c=ccolors[1], ls=cstyles[1])
line_incr, = plt.plot(x, inc1_xyr, label="inc r", c=ccolors[2], ls=cstyles[2])
line_inccat, = plt.plot(x, inc1_catcorr, label="inc cat", c=ccolors[3], ls=cstyles[3])
if eps == 0.0005:
plt.plot(x[0], inc1_xyr[0], marker='^', c='k', mfc='none', clip_on=False)
plt.plot(x[-1], inc1_xyr[-1], marker='v', c='k', mfc='none', clip_on=False)
# axxy.plot(x[0], inc1_catcorr[0], marker='^', c='k', mfc='none', clip_on=False)
# axxy.plot(x[-1], inc1_catcorr[-1], marker='v', c='k', mfc='none', clip_on=False)
plt.legend((line_incr, line_inccat), ("x,y", "identity"), loc=4)
plt.ylabel("encoding quality")
plt.xlabel("training repetitions")
plt.subplots_adjust(left=0.1, right=0.9, bottom=0.2)
# ad[0].legend((line_bd, line_incd, line_pred, line_latd, line_incmean, line_incvar), ("batch", "incr.", "SFAlo", "latents", "incmean", "incvar/100"), loc=5)
# ad[0].legend((line_bd, line_incd, line_pred, line_latd), ("batch", "incr.", "SFAlo", "latents"), loc=5)
plt.show()
| UTF-8 | Python | false | false | 10,304 | py | 165 | replay_prediction.py | 142 | 0.605784 | 0.580551 | 0 | 253 | 39.727273 | 161 |
dingdongyouarewrong/FaceRecognition | 5,849,745,488,729 | 8512a77909a5569b8e2614e6bc2b52d0433a8b1c | a87c1fcb7d21717ab3ace3f7c190b67e6d3958e8 | /findFace.py | 70e7b8de0cd103f68c491b62e0aef079bc85de6f | [] | no_license | https://github.com/dingdongyouarewrong/FaceRecognition | 3dc6af5465161b9a17849c4200b58a00cac1f79a | 498bd5c0fadde345aea08249f8e8c7b2602be3e5 | refs/heads/master | 2020-07-26T01:41:18.009134 | 2019-09-14T19:20:15 | 2019-09-14T19:20:15 | 208,490,610 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2, numpy
from keras.preprocessing.image import img_to_array
from model import create_model, findCosineDistance
haar_file = 'cascade/haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(haar_file)
vector = 0
webcam = cv2.VideoCapture(0)
count_of_faces = 1
model = create_model()
img1_descriptor = numpy.load("descriptors/img1_descriptor.npy")
img2_descriptor = numpy.load("descriptors/img2_descriptor.npy")
def get_name_from_base(descriptors):
if findCosineDistance(descriptors, img1_descriptor) <0.3:
return "1"
elif findCosineDistance(descriptors, img2_descriptor) <0.3:
return "2"
def mat_preprocessing(detected_face):
image_pixels = img_to_array(detected_face)
image_pixels = numpy.expand_dims(image_pixels, axis = 0)
image_pixels /= 127.5
image_pixels -=1
return image_pixels
while count_of_faces<30:
(_, image) = webcam.read()
faces = face_cascade.detectMultiScale(image,1.3,5)
for (x,y,width,height) in faces:
if width>130:
detected_face = image[int(y):int(y+height), int(x):int(x+width)]
detected_face = cv2.resize(detected_face,(224,224))
cv2.rectangle(image, (x, y), (x + width, y + height), (0, 255, 0), 3)
image_pixels = mat_preprocessing(detected_face)
captured_representation = model.predict(image_pixels)[0,:]
name = get_name_from_base(captured_representation)
cv2.putText(image, name, (x, y-10), cv2.QT_FONT_NORMAL, 0.7,(0,255,0),2)
cv2.imshow('FaceRecognition',image)
if cv2.waitKey(1) & 0xFF == ord('q'): #press q to quit
break
webcam.release()
| UTF-8 | Python | false | false | 1,670 | py | 4 | findFace.py | 3 | 0.668263 | 0.630539 | 0 | 52 | 31.115385 | 84 |
Kori3a/M-PT1-38-21 | 2,336,462,246,132 | cb51344bc68b1fa4dac9d124f966911f5e3b6230 | 89d7bd51638bb3e8ca588062af1a3ec4870efd55 | /Tasks/Abramyk Yulia/Project/BeautyShop/main/views.py | e7fab058a398e4cba57e6a4c395f5850fc7d7529 | [] | no_license | https://github.com/Kori3a/M-PT1-38-21 | 9aae9a0dba9c3d1e218ade99f7e969239f33fbd4 | 2a08cc4ca6166540dc282ffc6103fb7144b1a0cb | refs/heads/main | 2023-07-09T20:21:32.146456 | 2021-08-19T17:49:12 | 2021-08-19T17:49:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from .models import Comment, Price_Woman, Price_Man, Price_Nail
from .forms import CommentForm, FormLeadForm
from .telegramm import send_message
def main(request):
error = ''
if request.method == "POST":
form = FormLeadForm(request.POST)
if form.is_valid():
name = form.cleaned_data["name"]
tel = form.cleaned_data["tel"]
form.save()
message = "*ЗАЯВКА С САЙТА*: Logo" + "\n" + "*ИМЯ*: " + str(name) + "\n" + "*ТЕЛЕФОН*: " + str (tel)
send_message(message)
error = "Nice! We will call you soon :)"
else:
error = "Something is wrong, try again please"
form = FormLeadForm()
data = {"form": form,
"error": error,}
return render(request, 'main/main_page.html', data)
def price(request):
price_w = Price_Woman.objects.all()
price_m = Price_Man.objects.all()
price_n = Price_Nail.objects.all()
return render(request, 'main/price.html', {'price_w': price_w, 'price_m': price_m, 'price_n': price_n,})
def about(request):
return render(request, 'main/about_us.html')
def comments(request):
error = ''
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
form.save()
else:
error = "Something is wrong, try again please"
form = CommentForm()
comment = Comment.objects.all().order_by('-id')
data = {"form": form,
"error": error,
"comment": comment}
return render(request, 'main/comments.html', data) | UTF-8 | Python | false | false | 1,633 | py | 325 | views.py | 259 | 0.586592 | 0.586592 | 0 | 61 | 25.42623 | 113 |
kinghaoYPGE/career | 6,347,961,702,895 | 4561997cef0ad5f382f8542fba41b3a29a5aba83 | ddb09330eb51f823c1dc66617b363affd4e23290 | /python/20190325/my_asyncs/my_redis/async_redis_client/example/app.py | 2a7d626203fd81e980bf541ac2904f691a8f3951 | [
"MIT"
] | permissive | https://github.com/kinghaoYPGE/career | 9df8a1d5303f2981ba0116fa60284945cec03bfd | 50260f6ccf874c7c5f7af23f02ed823645ea7898 | refs/heads/master | 2020-04-04T11:19:44.480173 | 2019-04-17T14:31:37 | 2019-04-17T14:31:37 | 155,886,969 | 3 | 4 | MIT | false | 2019-03-21T09:12:01 | 2018-11-02T15:39:31 | 2019-03-20T16:23:19 | 2019-03-21T09:12:00 | 13,607 | 2 | 2 | 2 | CSS | false | null | # -*- coding:utf-8 -*-
from asyncredis import Client
import tornado.httpserver
import tornado.web
import tornado.ioloop
import tornado.gen
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('app')
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
c = Client()
# 从 Redis 数据库中获取键值
async_redis = yield tornado.gen.Task(c.get, "async_redis")
self.set_header("Content-Type", "text/html")
self.render("template.html", title="Simple demo", async_redis=async_redis)
application = tornado.web.Application([
(r'/', MainHandler),
])
# 设置键 async_redis 的值
@tornado.gen.coroutine
def create_test_data():
c = Client()
yield c.select(0)
yield c.set("async_redis", "redis异步客户端")
if __name__ == '__main__':
create_test_data()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8089)
print("Demo is run at 0.0.0.0:8080")
tornado.ioloop.IOLoop.instance().start()
| UTF-8 | Python | false | false | 1,067 | py | 411 | app.py | 274 | 0.674441 | 0.660836 | 0 | 45 | 21.866667 | 82 |
yazdanimehdi/teapo-backend | 11,630,771,466,095 | d30ca4ec3fa2819207d5e7d6e5e1270491b1ffd9 | 8e29db4e47d3e33130a753172a33301df33f7cbd | /tpo/models/speaking.py | 2fe234e6e3f1996f0213c3d9065293467a3b6e42 | [] | no_license | https://github.com/yazdanimehdi/teapo-backend | 3486b6b9c241157ecbfa4feac0e23eb3c8eb1267 | 32561d25cfff54fab28f6424467e318b70b1657f | refs/heads/master | 2023-04-19T22:00:01.639790 | 2021-01-05T00:19:07 | 2021-01-05T00:19:07 | 365,662,263 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
def upload_location_speaking(instance, filename):
return f"tpo/speaking/{instance.number}/{filename}"
class Speaking(models.Model):
related = models.CharField(max_length=200, blank=True, null=True)
speaking_audio_file = models.FileField(blank=True, null=True, upload_to=upload_location_speaking)
speaking_reading_title = models.CharField(blank=True, null=True, max_length=400)
speaking_reading = models.TextField(blank=True, null=True)
speaking_image = models.FileField(blank=True, null=True, upload_to=upload_location_speaking)
speaking_question = models.TextField()
speaking_question_audio_file = models.FileField(blank=True, null=True, upload_to=upload_location_speaking)
speaking_question_guide_audio_file = models.FileField(blank=True, null=True, upload_to=upload_location_speaking)
speaking_question_before_read_audio = models.FileField(blank=True, null=True, upload_to=upload_location_speaking)
number = models.IntegerField()
speaking_audio_file_transcript = models.TextField(blank=True, null=True)
sections = models.IntegerField(blank=True, null=True)
institute = models.ForeignKey(to='institutions.Users', on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return f"{self.related} {self.number} {self.speaking_question}"
| UTF-8 | Python | false | false | 1,351 | py | 75 | speaking.py | 59 | 0.750555 | 0.746114 | 0 | 24 | 55.25 | 117 |
kongyew/gpdb | 7,335,804,143,551 | 23b127602cb0daa5e2cccca6caf63d3ba44d8d28 | e84670cda6bc7543e00c248f317e833bebf4bb74 | /src/test/tinc/tinctest/test/test_discovery.py | d812191d9c600d73c5dd80c4e4635dc949cfd883 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"LicenseRef-scancode-other-copyleft",
"metamail",
"BSD-3-Clause",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-permissive",
"ISC",
"Python-2.0",
"bzip2-1.0.6",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-mit-modification-obligations",
"BSD-4-Clause-UC",
"curl",
"LicenseRef-scancode-rsa-md4",
"Zlib",
"BSD-2-Clause",
"Beerware",
"NTP",
"OLDAP-2.8",
"W3C",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-sun-bcl-sdk-5.0",
"LicenseRef-scancode-zeusbench",
"X11-distribute-modifications-variant",
"Spencer-94",
"HPND-sell-variant",
"MIT-CMU",
"W3C-19980720",
"RSA-MD",
"LicenseRef-scancode-stream-benchmark",
"OpenSSL",
"MIT"
] | permissive | https://github.com/kongyew/gpdb | d6f9fc2578e666df1e559a5cb538b91b48fd2615 | e3a8163ca4c12cbea0f17303bcf987e16e5bc3d6 | refs/heads/master | 2023-04-20T08:52:29.481397 | 2017-12-11T23:18:03 | 2017-12-16T01:25:22 | 99,146,458 | 0 | 0 | Apache-2.0 | true | 2023-08-20T18:03:44 | 2017-08-02T18:00:56 | 2017-08-02T18:01:43 | 2023-08-20T18:03:43 | 242,625 | 0 | 0 | 19 | C | false | false | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import os
import re
from contextlib import closing
from datetime import datetime
from StringIO import StringIO
from unittest2.runner import _WritelnDecorator
import tinctest
from tinctest.discovery import TINCDiscoveryQueryHandler, TINCDiscoveryException
import unittest2 as unittest
@unittest.skip('mock')
class MockTINCTestCase(tinctest.TINCTestCase):
def test_smoke_01(self):
"""
@tags smoke bug-1
"""
pass
def test_functional_02(self):
"""
@tags functional storage
"""
pass
class TINCDiscoveryQueryHandlerTests(unittest.TestCase):
def test_positive_queries(self):
test_queries = []
test_queries.append("class=test_smoke*")
test_queries.append("module=test_smoke*")
test_queries.append("method=test_smoke*")
test_queries.append("package=storage.uao.*")
test_queries.append("class=* and module=* or package=* and class=SmokeTests*")
test_queries.append("class=* AND module=* OR package=* AND class=SmokeTests*")
test_queries.append("class=* and module=* or package=* and class=SmokeTests*")
test_queries.append("""class != * and module != * or package != * and class != SmokeTests*
AND class = test""")
test_queries.append("class != \"test_smoke\"")
test_queries.append("class != 'test_smoke'")
test_queries.append("""class = 'test' AND module=\"test\"""")
test_queries.append("class = test* and method=test*")
test_queries.append("tags = tag1")
handler = TINCDiscoveryQueryHandler(test_queries)
self.assertEquals(len(handler.query_list), len(test_queries))
def test_negative_queries(self):
handler = TINCDiscoveryQueryHandler("class=test_smoke*")
test_queries = []
test_queries.append("classtest_smoke* and moduletestsmoke*")
# Partial match should error out
test_queries.append("class=test_smoke* remaining text")
# Unsupported operator
test_queries.append("class=test* XOR package=storage.*")
test_queries.append("class = test* AND module = test* OR packagetest*")
# Mixed cases not for operators supported
test_queries.append("claSS=test* oR Package=storage.*")
test_queries.append("class=test* and module=test* And package=storage")
# TBD: Following query currently passes , it should throw an exception instead
# test_queries.append("class = test*AND module = test*")
# Unclosed quotes in predicate should error out
test_queries.append("class='test*")
# Unmatched quotes in predicate should error out
test_queries.append("class='test*\"")
#hanging operators should error out
test_queries.append("class=test* and method=test* or")
for query in test_queries:
try:
handler = TINCDiscoveryQueryHandler(query)
except TINCDiscoveryException:
continue
self.fail("Query %s did not throw an exception" %query)
def test_apply_queries_with_single_query(self):
handler = TINCDiscoveryQueryHandler("method=test_smoke*")
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertTrue(handler.apply_queries(tinc_test_case))
# metadata equals predicate
handler = TINCDiscoveryQueryHandler("author=bob")
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertTrue(handler.apply_queries(tinc_test_case))
# metadata equals predicate with dash (typically used for bugs)
handler = TINCDiscoveryQueryHandler("tags=bug-1")
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertTrue(handler.apply_queries(tinc_test_case))
#metadata not equals predicate
handler = TINCDiscoveryQueryHandler("tags != functional")
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertTrue(handler.apply_queries(tinc_test_case))
# metadata non-match
# metadata equals predicate
handler = TINCDiscoveryQueryHandler("author=alice")
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertFalse(handler.apply_queries(tinc_test_case))
#metadata not equals predicate
handler = TINCDiscoveryQueryHandler("tags != smoke")
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertFalse(handler.apply_queries(tinc_test_case))
# non existing metadata should return False
handler = TINCDiscoveryQueryHandler("non_existing_tags = smoke")
# not equals on a non existing metadata will currently return True
# We will have to decide whether or not to throw an exception for such tests
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertFalse(handler.apply_queries(tinc_test_case))
def test_apply_queries_with_multiple_queries(self):
queries = []
queries.append("method=test_smoke*")
queries.append("class=Mock*")
queries.append("class='MockTINC*'")
queries.append("module='test_discovery*'")
queries.append("package = tinc*.test*")
queries.append("method != test_smoke_02")
queries.append("class != NonMock*")
queries.append("package != storage.uao.*")
queries.append("module != test_regression*")
queries.append("author = bob")
queries.append("tags = smoke")
queries.append("author != alice")
queries.append("tags != functional")
handler = TINCDiscoveryQueryHandler(queries)
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertTrue(handler.apply_queries(tinc_test_case))
queries = []
queries.append("method=test_smoke*")
queries.append("class=Mock*")
queries.append("class='MockTINC*'")
queries.append("module='test_discovery*'")
queries.append("package = storage.uao.*")
handler = TINCDiscoveryQueryHandler(queries)
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertFalse(handler.apply_queries(tinc_test_case))
def test_apply_queries_with_multiple_predicates(self):
queries = []
queries.append("method=test_smoke* OR class = Mock* or class = MockTINC*")
queries.append("package=storage.uao.* OR method=test_smoke*")
queries.append("package=storage.uao.* and method=*testsmoke* and module!=test_discovery or method=test_smoke_01")
#queries.append("package != storage.uao or method=*testsmoke* and module != test_discovery and class != Mock* and tags = smoke and author != alice")
handler = TINCDiscoveryQueryHandler(queries)
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertTrue(handler.apply_queries(tinc_test_case))
queries = []
queries.append("method=test_smoke* OR class = Mock* or class = MockTINC*")
queries.append("package=storage.uao.* OR method=test_smoke*")
queries.append("package=storage.uao.* and method=*testsmoke* and module!=test_discovery or method=test_smoke_01")
queries.append("package != storage.uao or method=*testsmoke* and module != test_discovery and class != Mock*")
# Introduce false at the end
queries.append("package != storage and method != test_smoke_01")
handler = TINCDiscoveryQueryHandler(queries)
tinc_test_case = MockTINCTestCase('test_smoke_01')
self.assertFalse(handler.apply_queries(tinc_test_case))
class TINCDiscoveryWithQueriesTests(unittest.TestCase):
"""
For the following tests, we use the following mock test cases from
tinctest/test/discovery/mockstorage/uao/test_* and
tinctest/test/discovery/mockquery/cardinality/test_*
There are three mock test modules within the two test folders containing 14 tests
upon which the following querying tests will operate:
test_functional_*.py: *FunctionalTests with two test methods of the form test_functional_*
test_smoke_*.py: *SmokaTests* with two tests methods of the form test_smoke_*
test_uao.py / test_cardinality.py: Three tests methods of the form test_* with tags 'smoke',
'functional', 'stress'
"""
def _discover_and_run_tests(self, start_dirs, patterns, top_level_dir, query_handler):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = tinc_test_loader.discover(start_dirs = start_dirs,
patterns = patterns,
top_level_dir = None,
query_handler = query_handler
)
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
return tinc_test_result
def test_discovery_with_no_queries(self):
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = None)
# Should have run all tests from mockstorage and mockquery
self.assertEquals(tinc_test_result.testsRun, 14)
def test_discovery_with_single_predicate(self):
query_handler = TINCDiscoveryQueryHandler("class=*SmokeTests*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run four smoke tests - 2 each from test_smoke_*.py in mockstorage/uao and mockquery/cardinality
self.assertEquals(tinc_test_result.testsRun, 4)
# Predicate on module
query_handler = TINCDiscoveryQueryHandler("module=*_functional_*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run four functional tests - 2 each from *FunctionalTests in test_functional_*.py in mockstorage/uao and mockquery/cardinality
self.assertEquals(tinc_test_result.testsRun, 4)
# Predicate on package
query_handler = TINCDiscoveryQueryHandler("package=*query.*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run only tests from the package mockquery/*
self.assertEquals(tinc_test_result.testsRun, 7)
# predicate on test method
query_handler = TINCDiscoveryQueryHandler("method=*functional*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run only four functional tests (test_functional_*) from *FunctionalTests in the modules test_functional_*.py in
# mockquery/cardinality and mockstorage/uao
self.assertEquals(tinc_test_result.testsRun, 4)
# predicate on metadtata
query_handler = TINCDiscoveryQueryHandler("tags = stress")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run the two test methods tagged 'stress' in test_uao.py and test_cardinality.py
self.assertEquals(tinc_test_result.testsRun, 2)
def test_single_predicate_not_equals(self):
query_handler = TINCDiscoveryQueryHandler("class != *SmokeTests*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run everything except the tests in test_smoke_*.py
self.assertEquals(tinc_test_result.testsRun, 10)
# Predicate on module
query_handler = TINCDiscoveryQueryHandler("module != *_functional_*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run everything except the tests in test_functional*.py in mockquery and mockstorage
self.assertEquals(tinc_test_result.testsRun, 10)
# Predicate on package
query_handler = TINCDiscoveryQueryHandler("package != *query.*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run all the tests from within mockstorage
self.assertEquals(tinc_test_result.testsRun, 7)
# predicate on test method
query_handler = TINCDiscoveryQueryHandler("method != *functional*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run all tests in all the modules except for the tests in test_functioanl_*.py of the form test_functional*
self.assertEquals(tinc_test_result.testsRun, 10)
# predicate on metadtata
query_handler = TINCDiscoveryQueryHandler("tags != stress")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run all the tests except for the tests tagged 'stress' within test_uao.py and test_cardinality.py
self.assertEquals(tinc_test_result.testsRun, 12)
def test_multiple_predicates_within_a_query(self):
# Run all smoke tests
query_handler = TINCDiscoveryQueryHandler("tags = smoke or method = *smoke* or class = *SmokeTests*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run all the tests from within test_module*.py and one test tagged 'smoke' in test_uao.py and test_cardinality.py
self.assertEquals(tinc_test_result.testsRun, 6)
# Run all smoke tests that are not tagged
query_handler = TINCDiscoveryQueryHandler("method = *smoke* or class = *SmokeTests*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run four tests, two each from within test_smoke_*.py in mockstorage and mockquery
self.assertEquals(tinc_test_result.testsRun, 4)
# Run all functional tests
query_handler = TINCDiscoveryQueryHandler("tags = functional or method = *functional* or class = *FunctionalTests*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run all the functional tests , four from test_functional_*.py and one test each tagged 'functional'
# from within test_uao.py and test_cardinality.py
self.assertEquals(tinc_test_result.testsRun, 6)
# With AND predicates
query_handler = TINCDiscoveryQueryHandler("method = *functional* and class = *FunctionalTests*")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Should have run only four tests from within test_functional*.py in mockquery and mockstorage
self.assertEquals(tinc_test_result.testsRun, 4)
# Run all the tests except for stress tests
query_handler = TINCDiscoveryQueryHandler("module = test_* and tags != stress")
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
# Run all the tests from within mockquery and mockstorage except the tests tagged 'stress' within
# test_uao.py and test_cardinality.py
self.assertEquals(tinc_test_result.testsRun, 12)
def test_multiple_queries(self):
# Run all tests in class *SmokeTests* and starts with test_smoke_*
queries = ['class = *SmokeTests*', 'method=test_smoke*']
query_handler = TINCDiscoveryQueryHandler(queries)
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test*'],
top_level_dir = None, query_handler = query_handler)
self.assertEquals(tinc_test_result.testsRun, 4)
def test_queries_with_patterns(self):
# patterns should be applied first
queries = ['method = test_*']
query_handler = TINCDiscoveryQueryHandler(queries)
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery', 'mockquery')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir], patterns =['test_functional*'],
top_level_dir = None, query_handler = query_handler)
# Only run two tests within mockquery since the patterns should be applied first and the queries should be applied
# on the tests that match the pattern
self.assertEquals(tinc_test_result.testsRun, 2)
def test_queries_with_patterns_across_multiple_folders(self):
# patterns should be applied first
queries = ['method = test_*']
query_handler = TINCDiscoveryQueryHandler(queries)
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'discovery', 'mockquery')
test_dir2 = os.path.join(pwd, 'discovery', 'mockstorage')
tinc_test_result = self._discover_and_run_tests(start_dirs = [test_dir, test_dir2], patterns =['test_functional*'],
top_level_dir = None, query_handler = query_handler)
# Only run two tests within mockquery since the patterns should be applied first and the queries should be applied
# on the tests that match the pattern
self.assertEquals(tinc_test_result.testsRun, 4)
| UTF-8 | Python | false | false | 22,099 | py | 2,859 | test_discovery.py | 1,984 | 0.623693 | 0.620119 | 0 | 420 | 51.22619 | 156 |
aman-17/Machine-learning | 9,929,964,434,623 | 5a8219fe955b4440afbab45d902f7713d8f3f2a0 | d220343937f5195b343e6a9c22d4faf3d3b35cba | /mid/q10.py | 8c11036e5e8e2b698e493b22c259d1a5a7c9ab32 | [
"Apache-2.0"
] | permissive | https://github.com/aman-17/Machine-learning | cdada879b62ce6f1f86bf94ab3f2188503f5fa8c | da5e4d3e1de56d68a2fedab7c00c0db0f1400399 | refs/heads/master | 2023-05-14T02:29:00.036872 | 2023-04-30T17:14:12 | 2023-04-30T17:14:12 | 297,309,696 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy
import pandas
import sys
# Set some options for printing all the columns
numpy.set_printoptions(precision = 10, threshold = sys.maxsize)
numpy.set_printoptions(linewidth = numpy.inf)
pandas.set_option('display.max_columns', None)
pandas.set_option('display.expand_frame_repr', False)
pandas.set_option('max_colwidth', None)
pandas.options.display.float_format = '{:,.10}'.format
from scipy.stats import norm
from sklearn import metrics, naive_bayes
# Specify the roles
feature = ['height', 'weight']
target = 'bmi_status'
# Read the Excel file
input_data = pandas.read_excel('./mid/bmi_men.xlsx')
bmi_men = input_data[feature + [target]].dropna().reset_index(drop = True)
cmap = ['blue', 'green', 'orange', 'red']
slabel = ['Below', 'Normal', 'Over', 'Obese']
plt.figure(figsize = (8,6), dpi = 80)
for status in range(4):
plot_data = bmi_men[bmi_men['bmi_status'] == (status+1)]
plt.scatter(plot_data['weight'], plot_data['height'], c = cmap[status], label = slabel[status])
plt.xlabel('Weight in Kilograms')
plt.ylabel('Height in Meters')
plt.xticks(numpy.arange(50, 130, 10))
plt.yticks(numpy.arange(1.0, 2.8, 0.2))
plt.grid(axis = 'both', linestyle = '--')
plt.legend(title = 'BMI Level')
# plt.show()
xTrain = bmi_men[feature]
yTrain = bmi_men[target].astype('category')
_objNB = naive_bayes.GaussianNB()
thisFit = _objNB.fit(xTrain, yTrain)
# print('Probability of each target class')
# print(thisFit.class_prior_)
# print('Means of Features of each target class')
# print(thisFit.theta_)
# print('Variances of Features of each target class')
# print(thisFit.var_)
# print('Number of samples encountered for each class during fitting')
# print(thisFit.class_count_)
yTrain_predProb = _objNB.predict_proba(xTrain)
yTrain_predClass = _objNB.predict(xTrain)
confusion_matrix = metrics.confusion_matrix(yTrain, yTrain_predClass)
# Manually calculate the predicted probability
class_prob = bmi_men.groupby(target).size() / bmi_men.shape[0]
summary_height = bmi_men.groupby(target)['height'].describe()
summary_weight = bmi_men.groupby(target)['weight'].describe()
logpdf_height = norm.logpdf(1.85, loc = summary_height['mean'], scale = summary_height['std'])
logpdf_weight = norm.logpdf(96.0, loc = summary_weight['mean'], scale = summary_weight['std'])
logpdf = numpy.log(class_prob) + logpdf_weight + logpdf_height
my_prob = numpy.exp(logpdf)
sum_prob = numpy.sum(my_prob)
my_prob = numpy.divide(my_prob, sum_prob)
xTest = pandas.DataFrame({'height': [1.85], 'weight': [96.0]})
yTest_predProb = _objNB.predict_proba(xTest)
print(my_prob)
print(yTest_predProb)
| UTF-8 | Python | false | false | 2,720 | py | 50 | q10.py | 40 | 0.694853 | 0.681618 | 0 | 83 | 30.771084 | 99 |
chrissmith10/projects-freecodecamp | 8,254,927,183,928 | 40fe84bc8afd2ce6ba173b6a061125a1a457a6c7 | e9facc809057cb7a8eb9395c38ded8ef2feb8e2b | /portscanner1.py | 958482566e40e110bd75a77d0527a7d0e809c43b | [] | no_license | https://github.com/chrissmith10/projects-freecodecamp | 4e046241274e45cb4677ccea3265434dc56dafcf | 7d6c079e6b0b3371c69d85185a684b1d53a3c564 | refs/heads/main | 2023-02-17T23:58:44.826302 | 2021-01-01T18:21:31 | 2021-01-01T18:21:31 | 314,051,778 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Port Scanner
import socket
# https://docs.python.org/3/library/socket.html
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#host = "137.74.187.100" # you can get this number by entering "ping 'website URL'" in the CLI
#port = 443 # could check ports 443, 80, 21, etc
host = input("Please enter the IP address you want to scan: ")
port = int(input("Please enter the port you want to scan: "))
def portScanner(port):
if s.connect_ex((host, port)):
print("The port is closed")
else:
print("The port is open")
portScanner(port)
| UTF-8 | Python | false | false | 582 | py | 7 | portscanner1.py | 7 | 0.654639 | 0.616838 | 0 | 20 | 27.1 | 94 |
woshichenya/hezi | 9,783,935,521,028 | 3a4e44727efd8f3783015078082523984da3a585 | 59636b143a2ab189145b17a7ea9a38de5af1f7a5 | /All/tianmao2.py | 0ca0b88838b183c731a6f93f223b33528d7e0ed8 | [] | no_license | https://github.com/woshichenya/hezi | 880a70c34cc61b6b6bcf1ccb65fa54989595fb71 | 4211ff8ef78f5d15d8fc8065247f916dfe9d305d | refs/heads/master | 2020-04-28T21:46:02.664025 | 2019-05-14T08:47:33 | 2019-05-14T08:47:33 | 175,593,966 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from beifen import baibaoxiang, femail
import threading
email= femail.email
import time
url="https://detail.tmall.com/item.htm?id=586750895922&ut_sk=1.WS2NQGDD3RQDABrAF+sKkx4n_21380790_1551403955700.PanelQRCode.1&sourceType=item&price=199&suid=184110C6-48F2-46A7-AACA-D74A10C4C13D&un=11b7dc7b2f1a39340ece3c7aa15a835b&share_crt_v=1&sp_tk=77%20lTjNDeGJGQzQ5VzXvv6U=&cpp=1&shareurl=true&spm=a313p.22.28z.1013584011443&short_name=h.3w5o784&cv=N3CxbFC49W5&sm=4dff6f&app=firefox"
url2="https://www.tmall.com/?spm=a220o.1000855.a2226mz.1.25035819mQyNuy"
class b (threading.Thread):
def __init__(self,jubing):
threading.Thread.__init__(self)
self.t=jubing
def run(self):
go = baibaoxiang.geturl(url2)
go.Ctext("请登录", "请登录", "进入登录页面", "无法进入登录页面")
go.llq.maximize_window()
ss = 0
while ss < 20:
if go.llq.current_url == url2:
break
time.sleep(1)
go.llq.get(url)
while ss < 1000:
# print("第",self.jubing,"个进程开始")
try:
jiaru = go.llq.find_elements_by_id("J_LinkBasket")
for ii in jiaru:
print(ii.text)
if "加入购物车" in ii.text:
ii.click()
print("指定商品加入购物车成功")
# break
email("已抢购成功", "plain", "")
except:
go.llq.get(url)
time.sleep(self.t)
go.llq.get(url)
b(1).start()
b(0.3).start()
| UTF-8 | Python | false | false | 1,643 | py | 328 | tianmao2.py | 313 | 0.564881 | 0.464816 | 0 | 52 | 28.75 | 385 |
huningfei/python | 10,874,857,233,181 | 81885abaae7000dc8e548c60d8cc707b7858f1f2 | f72fa4432e6abb742cbf1c61c580db1ed688a311 | /day26/scrapy框架/first/first/spiders/qiushi.py | 6e9761d030b4d629e9e6b3181a1653a3a45b9616 | [] | no_license | https://github.com/huningfei/python | 7ddc9da14a3e53ad1c98fc48edd1697a6f8fc4f7 | 9ca1f57f2ef5d77e3bb52d70ac9a241b8cde54d2 | refs/heads/master | 2022-10-31T18:56:33.894302 | 2019-01-04T11:06:59 | 2019-01-04T11:06:59 | 128,178,516 | 2 | 1 | null | false | 2022-10-12T19:26:04 | 2018-04-05T08:25:32 | 2021-02-19T03:39:54 | 2019-01-05T06:20:50 | 73,572 | 1 | 1 | 1 | Python | false | false | # -*- coding: utf-8 -*-
import scrapy
class QiushiSpider(scrapy.Spider):
name = 'qiushi'
# allowed_domains = ['www.aa.com']
start_urls = ['https://www.qiushibaike.com/']
def parse(self, response):
# xpath为response中的方法,可以将xpath表达式直接作用于该函数中
odiv = response.xpath('//div[@id="content-left"]/div')
content_list = [] # 用于存储解析到的数据
for div in odiv:
# xpath函数返回的为列表,列表中存放的数据为Selector类型的数据。我们解析到的内容被封装在了Selector对象中,需要调用extract()函数将解析的内容从Selecor中取出。
author = div.xpath('.//div[@class="author clearfix"]/a/h2/text()')[0].extract()
content = div.xpath('.//div[@class="content"]/span/text()').extract()
print(content)
# 将解析到的内容封装到字典中
dic = {
'作者': author,
'内容': content
}
# 将数据存储到content_list这个列表中
content_list.append(dic)
return content_list
# 运行方法在cmd里面,进去你所建的那个项目,
# 执行输出指定格式进行存储:将爬取到的数据写入不同格式的文件中进行存储
# scrapy crawl qiubai -o qiubai.json
# scrapy crawl qiubai -o qiubai.xml
# scrapy crawl qiubai -o qiubai.csv
# #持久化存储方式:
# #1.基于终端指令:必须保证parse方法有一个可迭代类型对象的返回 ,content_list就是一个科迭代的
# #2.基于管道 | UTF-8 | Python | false | false | 1,665 | py | 544 | qiushi.py | 345 | 0.589619 | 0.585564 | 0 | 42 | 28.380952 | 109 |
waveproject0/nxtgen-backend | 6,279,242,206,852 | ebe79c092aef6873bb069ae20b93e81073d71968 | 46bbdf98aefe92a7ec1dbbeaeba1b74d76a64b33 | /announcement/models.py | f7f586d65f297a4d9317bba8efe9b083d7ad396e | [] | no_license | https://github.com/waveproject0/nxtgen-backend | bb40794f03b4c00a0e7bffb492530d3487c6d085 | 33d37018ae2b4e29ddc3d4dcd1291a342e2553ce | refs/heads/master | 2023-04-16T07:25:01.814422 | 2021-04-30T13:00:01 | 2021-04-30T13:00:01 | 363,143,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import timedelta, date
from django.db import models
from django.db.models.signals import post_save
from post.models import Post
from institution.models import Institution
from department.models import Department
from Class.models import Class, ClassSectionRelation, ClassSubjectTeacherRelation, AdditionalSubjectTeacherRelation
from commentExplanation.models import Comment
class Announcement(models.Model):
STATUS_CHOICE = (
('draft', 'Draft'),
('active','Active'),
('archive','Archived'),
)
post = models.OneToOneField(Post, on_delete=models.CASCADE)
status = models.CharField(max_length=20,choices=STATUS_CHOICE, default='draft')
block_comment = models.BooleanField(default=False)
archive_date = models.DateField(auto_now=False, auto_now_add=False,null=True, blank=True)
publish = models.DateTimeField(auto_now=False, auto_now_add=False ,null=True, blank=True)
def __str__(self):
return self.post.title +" / "+ self.status
# announcement creation signal
def create_announcement(sender, **kwargs):
if kwargs['created']:
if getattr(kwargs['instance'], '_post_for') == 'announcement':
announcement_obj = Announcement(post=kwargs['instance'])
if hasattr(kwargs['instance'], '_status'):
if getattr(kwargs['instance'], '_status') == 'active':
announcement_obj.status = kwargs['instance']._status
announcement_obj.publish = date.today()
if hasattr(kwargs['instance'], '_block_comment'):
announcement_obj.block_comment = kwargs['instance']._block_comment
if hasattr(kwargs['instance'], '_archive_date'):
announcement_obj.archive_date = kwargs['instance']._archive_date
else:
announcement_obj.archive_date = announcement_obj.publish + timedelta(days=10)
announcement_obj._authority = kwargs['instance']._authority
announcement_obj._authority_model_obj = kwargs['instance']._authority_model_obj
announcement_obj.save()
post_save.connect(create_announcement, sender=Post, weak=False)
#signal ends
class CommentAnnouncementRelation(models.Model):
comment = models.OneToOneField(Comment, on_delete=models.CASCADE)
announcement = models.ForeignKey('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.comment.body +" / "+ self.announcement.post.title
def create_comment_announcement(sender, **kwargs):
if kwargs['created']:
if getattr(kwargs['instance'], '_post_for') == 'announcement':
CommentAnnouncementRelation.objects.create(
comment=kwargs['instance'],announcement=kwargs['instance']._post_model_obj
)
post_save.connect(create_comment_announcement, sender=Comment, weak=False)
class InstitutionAnnouncementRelation(models.Model):
institution = models.ForeignKey(Institution, on_delete=models.CASCADE)
announcement = models.OneToOneField('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.institution.name +" / "+ self.announcement.post.title
class DepartmentAnnouncementRelation(models.Model):
department = models.ForeignKey(Department, on_delete=models.CASCADE)
announcement = models.OneToOneField('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.department.name +" / "+ self.announcement.post.title
class ClassAnnouncementRelation(models.Model):
Class = models.ForeignKey(Class, on_delete=models.CASCADE)
announcement = models.OneToOneField('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.Class.course.name +" / "+ self.announcement.post.title
class SectionAnnouncementRelation(models.Model):
section = models.ForeignKey(ClassSectionRelation, on_delete=models.CASCADE)
announcement = models.OneToOneField('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.section.section +" / "+ self.announcement.post.title
class SubjectTeacherAnnouncementRelation(models.Model):
section_subject = models.ForeignKey(ClassSubjectTeacherRelation, on_delete=models.CASCADE)
announcement = models.OneToOneField('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.section_subject.subject.subject.name +" / "+ self.announcement.post.title
class AdditionalSubjectTeacherAnnouncementRelation(models.Model):
section_additional_subject = models.ForeignKey(AdditionalSubjectTeacherRelation, on_delete=models.CASCADE)
announcement = models.OneToOneField('Announcement', on_delete=models.CASCADE)
def __str__(self):
return self.section_additional_subject.subject.subject.name +" / "+ self.announcement.post.title
# announcement relation creation signal
def create_announcement_relation(sender, **kwargs):
if kwargs['created']:
if getattr(kwargs['instance'], '_authority') == 'adminUser':
InstitutionAnnouncementRelation.objects.create(
institution=kwargs['instance']._authority_model_obj, announcement=kwargs['instance']
)
if getattr(kwargs['instance'], '_authority') == 'hod':
DepartmentAnnouncementRelation.objects.create(
department=kwargs['instance']._authority_model_obj, announcement=kwargs['instance']
)
if getattr(kwargs['instance'], '_authority') == 'classTeacher':
ClassAnnouncementRelation.objects.create(
Class=kwargs['instance']._authority_model_obj, announcement=kwargs['instance']
)
if getattr(kwargs['instance'], '_authority') == 'sectionTeacher':
SectionAnnouncementRelation.objects.create(
section=kwargs['instance']._authority_model_obj, announcement=kwargs['instance']
)
if getattr(kwargs['instance'], '_authority') == 'sectionSubjectTeacher':
SubjectTeacherAnnouncementRelation.objects.create(
section_subject=kwargs['instance']._authority_model_obj, announcement=kwargs['instance']
)
if getattr(kwargs['instance'], '_authority') == 'sectionAdditionalSubjectTeacher':
AdditionalSubjectTeacherAnnouncementRelation.objects.create(
section_additional_subject=kwargs['instance']._authority_model_obj, announcement=kwargs['instance']
)
post_save.connect(create_announcement_relation, sender=Announcement, weak=False)
#signal ends | UTF-8 | Python | false | false | 6,028 | py | 99 | models.py | 98 | 0.754811 | 0.754147 | 0 | 165 | 35.539394 | 115 |
sportsguys/NOM_backend | 10,737,418,275,054 | 95f3a63c0aab7215ed7e9b50c6362a1a7660385d | 13f3f8db9fd7dd58fa11e745e3d11d04bc836589 | /scraping/player.py | 1024f4ea6450e83383758c7bb17843299f3a0ccd | [] | no_license | https://github.com/sportsguys/NOM_backend | a0028388cfd31a100ec74a374803cb0152362270 | 6ca529bed87ccbd3c4a22ba158993796e79ec66d | refs/heads/main | 2023-04-23T10:28:27.940378 | 2021-05-10T18:39:08 | 2021-05-10T18:39:08 | 335,767,853 | 0 | 0 | null | false | 2021-05-10T18:39:03 | 2021-02-03T22:06:06 | 2021-04-27T08:10:02 | 2021-04-27T08:21:23 | 2,146 | 0 | 0 | 1 | Python | false | false | from scraping.player_seasons import QB, WR, RB, Defense, Kicker, PlayerSeason
from scraping.Page import Page
from db.models import player
class Player(Page, player):
def __init__(self, name:str, url:str, position:str):
self.name = name
self.position = position
self.url = url
def get_seasons(self):
self.load_page('https://www.pro-football-reference.com' + self.url)
table_rows = self.bs.select_one('table tbody').contents
seasons = []
statlines = []
for row in table_rows:
if row == '\n':
continue
try:
stats = player_season_table_switch[self.position](self.id)
season = PlayerSeason(self.id)
except KeyError as e:
print(e, 'position not recognized')
continue
season.ping(row)
stats.ping(row)
if int(season.year_id) < 2000:
continue
seasons.append(season)
statlines.append(stats)
return seasons, statlines
# module level variable only needs to be initialized once
player_season_table_switch = {
'QB': QB,
'WR': WR,
'TE': WR,
'RB': RB,
'CB': Defense,
'DB': Defense,
'OLB': Defense,
'LB': Defense,
'DL': Defense,
'DE': Defense,
'K' : Kicker,
'HB': RB,
'FB': RB,
'DT': Defense,
'ILB': Defense,
'EDGE': Defense
}
| UTF-8 | Python | false | false | 1,453 | py | 26 | player.py | 23 | 0.545079 | 0.542326 | 0 | 51 | 27.490196 | 77 |
chunshan160/python | 9,612,136,843,735 | e9498a74591f8acc6ad7bd0701a73140d5ed843a | 5b1a02001c0ba0042d0ed6650003cb8f8ed99fe5 | /home_work/learn/小说爬虫.py | 0cdb60e27266a314d79b30753575d08152412413 | [] | no_license | https://github.com/chunshan160/python | 6d2fdc96fffcbae7ace9590b0fd1a14ff454129d | e8dbebb307b74bf13ec7447a341329fc2a4a0914 | refs/heads/master | 2021-04-01T05:48:24.124326 | 2021-01-07T07:07:40 | 2021-01-07T07:07:40 | 229,753,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time :2020/7/8 15:34
# @Author :春衫
# @File :猫眼爬虫.py
import re
import requests
from bs4 import BeautifulSoup
'''
如果你想实现此脚本
就必须借助一些第三方工具
联网工具爬虫工具返回数据
数据筛选包
re正则表达式
bs4网页选择器 pip install bs4
xpath
pyquary
将我们的数据以json的数据格式保存下来
json前端 html|
import reguests
from bs4 import BeautifulSoup
import json
爬虫的运行流程:
模拟浏览器向服务器[网站后台提供数据能接收http请求]发送http请求(get(),post())
服务器接收到请求后向爬虫返回数据
代码思路:
1.使用代码去打开书籍详情页,并且返回详情页的所有数据
2.请求成功拿到详情页数据之后,去做数据筛选
3.把筛选好的数据放在文件中 文件操作
筛选数据
1.提取所有小说章节名称
2.提取所有小说章节中的a标签中的链接,对主域名做字符串拼接
3.利用requests对拼接好的域名做请求
4.拿到内容页数据后做数据筛选
'''
def get_response(url):
try:
response = requests.get(url)
except:
print("请求异常")
return None
else:
response.encoding = 'utf-8'
return response.text
def getbook(url, file_path, mode=True):
# 响应文本
response =get_response(url)
# 网页选择器实例化
soup = BeautifulSoup(response, 'lxml')
#书名
book_name=soup.find('h1').text
print(book_name)
# 全部章节
data_list = soup.find('ul')
# 具体章节
for book in data_list.find_all('a'):
# 获取章节跳转URL
book_url = url + book["href"]
# 获取章节文本
data_book = get_response(book_url)
# 网页选择器实例化
soup = BeautifulSoup(data_book, 'lxml')
# 章节名
chapter_name = soup.find('h1').get_text()
print(chapter_name,"爬取完成")
# 章节内容
data = soup.find('div', {'id': 'htmlContent'}).text
# 文件操作
# 全部章节写入一个文件内
if mode == True:
with open(file_path+book_name+'.txt', 'a+', encoding='utf-8') as f:
f.write(chapter_name + '\n' + '\n' + data + '\n' + '\n')
# 每个章节单独写入一个文件内
else:
file_name = re.sub('[\/:*?"<>|]', '-', book.text) # 去掉非法字符
with open(file_path+file_name+'.txt', 'w', encoding='utf-8') as f:
f.write(data)
if __name__ == '__main__':
url = 'http://www.biquw.com/book/5336/'
file_path = 'E:\\新建文件夹\\凡人修仙传仙界篇\\'
getbook(url, file_path, mode=True)
| UTF-8 | Python | false | false | 2,772 | py | 386 | 小说爬虫.py | 356 | 0.591351 | 0.576288 | 0 | 95 | 20.663158 | 79 |
nmsinger/NSL-CTF-Team | 4,922,032,568,716 | 24be9b694ec5b688451ab2bee108585a2f777c0a | 2e9f6daf1ed6d219aa305ab3509cff312be1e19c | /2021, Hack a Sat/HAS-Qualifier-Challenges_2020/rbs_m2/generator/signal_generator.py | 6ea1c1c6f0ee7ab9cb2c6f924aa7ee078ab8baa5 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-4.0",
"MIT"
] | permissive | https://github.com/nmsinger/NSL-CTF-Team | 405d98b86e1fe6fc3eafd2eb986f336a1311b5e2 | 243155d9de0f1f1eb3029ccecb6fdfaabc35e0de | refs/heads/main | 2023-08-28T15:14:26.951578 | 2021-09-12T18:50:07 | 2021-09-12T18:50:07 | 378,773,030 | 2 | 0 | MIT | false | 2021-06-21T01:30:40 | 2021-06-21T01:21:08 | 2021-06-21T01:21:11 | 2021-06-21T01:30:40 | 0 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: pwm_side_channel_sim
# GNU Radio version: 3.8.1.0
import datetime
import struct
import time
from distutils.version import StrictVersion
from antenna_control import AntennaController
from orbit import SatelliteObserver
from randomize_challenge import RandomChallengeFactory
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print("Warning: failed to XInitThreads()", file=sys.stderr)
from gnuradio import analog
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
import sys
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import eng_notation
import math
REALTIME_PWM_FREQUENCY = 50
PWM_FREQUENCY_MAX = 4000
MAX_DUTY_PERCENT = 0.35
MIN_DUTY_PERCENT = 0.05
WAIT_SEC = 1
OBSERVATION_TIME = 120
class pwm_side_channel_sim(gr.top_block):
def __init__(self, duty_cycle, pwm_frequency, output_filename, noise_seed):
gr.top_block.__init__(self, "pwm_side_channel_sim")
##################################################
# Variables
##################################################
self.pulse_freq = pulse_freq = pwm_frequency
self.duty_cycle = duty_cycle
self.samp_rate = samp_rate = 2048*pwm_frequency
self.pulse_length = pulse_length = 0.001 / pwm_frequency
self.pulse2_phase_shift = pulse2_phase_shift = -2*math.pi*duty_cycle
self.output_samp_rate = output_samp_rate = 2048 * pwm_frequency
self.output_filename = output_filename
##################################################
# Blocks
##################################################
self.blocks_throttle_0_1_0_0_1 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
self.blocks_throttle_0_1_0_0_0_1_0 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
self.blocks_throttle_0_1_0_0_0_1 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
self.blocks_throttle_0_1_0_0_0_0 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
self.blocks_throttle_0_1_0_0_0 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
self.blocks_throttle_0_1_0_0 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
self.blocks_throttle_0_0 = blocks.throttle(gr.sizeof_float*1, output_samp_rate,True)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_float*1, output_samp_rate,True)
self.blocks_skiphead_0 = blocks.skiphead(gr.sizeof_float * 1, 0)
self.blocks_multiply_xx_1_0 = blocks.multiply_vff(1)
self.blocks_multiply_xx_1 = blocks.multiply_vff(1)
self.blocks_multiply_xx_0 = blocks.multiply_vff(1)
self.blocks_multiply_const_vxx_4_0 = blocks.multiply_const_ff(0.75)
self.blocks_multiply_const_vxx_4 = blocks.multiply_const_ff(0.75)
self.blocks_multiply_const_vxx_3 = blocks.multiply_const_ff(duty_cycle)
self.blocks_multiply_const_vxx_2 = blocks.multiply_const_ff(0.5)
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_ff(-1)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_ff(36)
self.blocks_head_0 = blocks.head(
gr.sizeof_float * 1,
int(1.25*output_samp_rate*(REALTIME_PWM_FREQUENCY / PWM_FREQUENCY_MAX))
)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_float*1, output_filename, False)
self.blocks_file_sink_0.set_unbuffered(False)
self.blocks_add_xx_1 = blocks.add_vff(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.blocks_add_const_vxx_1 = blocks.add_const_ff(1)
self.blocks_add_const_vxx_0 = blocks.add_const_ff(-90)
self.blocks_abs_xx_0_1 = blocks.abs_ff(1)
self.blocks_abs_xx_0_0_1 = blocks.abs_ff(1)
self.blocks_abs_xx_0_0_0 = blocks.abs_ff(1)
self.blocks_abs_xx_0_0 = blocks.abs_ff(1)
self.blocks_abs_xx_0 = blocks.abs_ff(1)
self.analog_sig_source_x_2 = analog.sig_source_f(samp_rate, analog.GR_SQR_WAVE, pulse_freq, 1, 0, 0)
self.analog_sig_source_x_1_0_1 = analog.sig_source_f(samp_rate, analog.GR_SQR_WAVE, pulse_freq, 1, 0, -math.pi*0.0)
self.analog_sig_source_x_1_0_0_0 = analog.sig_source_f(samp_rate, analog.GR_SQR_WAVE, pulse_freq, 1, 0, math.pi*(1-pulse_length*(pulse_freq*0.625)))
self.analog_sig_source_x_1_0_0 = analog.sig_source_f(samp_rate, analog.GR_SQR_WAVE, pulse_freq, 1, 0, pulse2_phase_shift+math.pi*(1-pulse_length*(pulse_freq)))
self.analog_sig_source_x_1_0 = analog.sig_source_f(samp_rate, analog.GR_SQR_WAVE, pulse_freq, 1, 0, pulse2_phase_shift)
self.analog_sig_source_x_0 = analog.sig_source_f(samp_rate, analog.GR_TRI_WAVE, pulse_freq, 1, 0, )
self.analog_noise_source_x_0 = analog.noise_source_f(analog.GR_GAUSSIAN, 0.06, noise_seed)
##################################################
# Connections
##################################################
self.connect((self.analog_noise_source_x_0, 0), (self.blocks_throttle_0_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_throttle_0_1_0_0_0_1, 0))
self.connect((self.analog_sig_source_x_1_0, 0), (self.blocks_throttle_0_1_0_0, 0))
self.connect((self.analog_sig_source_x_1_0_0, 0), (self.blocks_throttle_0_1_0_0_0, 0))
self.connect((self.analog_sig_source_x_1_0_0_0, 0), (self.blocks_throttle_0_1_0_0_0_0, 0))
self.connect((self.analog_sig_source_x_1_0_1, 0), (self.blocks_throttle_0_1_0_0_1, 0))
self.connect((self.analog_sig_source_x_2, 0), (self.blocks_throttle_0_1_0_0_0_1_0, 0))
self.connect((self.blocks_abs_xx_0, 0), (self.blocks_multiply_xx_1, 0))
self.connect((self.blocks_abs_xx_0_0, 0), (self.blocks_multiply_xx_1, 1))
self.connect((self.blocks_abs_xx_0_0_0, 0), (self.blocks_multiply_xx_1_0, 1))
self.connect((self.blocks_abs_xx_0_0_1, 0), (self.blocks_multiply_const_vxx_2, 0))
self.connect((self.blocks_abs_xx_0_1, 0), (self.blocks_multiply_xx_1_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_add_const_vxx_1, 0), (self.blocks_abs_xx_0_0_1, 0))
self.connect((self.blocks_add_xx_0, 0), (self.blocks_add_xx_1, 3))
self.connect((self.blocks_add_xx_1, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.blocks_add_const_vxx_1, 0))
self.connect((self.blocks_multiply_const_vxx_2, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_multiply_const_vxx_3, 0), (self.blocks_add_xx_0, 0))
self.connect((self.blocks_multiply_const_vxx_4, 0), (self.blocks_add_xx_1, 1))
self.connect((self.blocks_multiply_const_vxx_4_0, 0), (self.blocks_add_xx_1, 2))
self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_add_xx_0, 1))
self.connect((self.blocks_multiply_xx_1, 0), (self.blocks_multiply_const_vxx_3, 0))
self.connect((self.blocks_multiply_xx_1, 0), (self.blocks_multiply_const_vxx_4_0, 0))
self.connect((self.blocks_multiply_xx_1_0, 0), (self.blocks_multiply_const_vxx_4, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_skiphead_0, 0))
self.connect((self.blocks_head_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_skiphead_0, 0), (self.blocks_head_0, 0))
self.connect((self.blocks_throttle_0_0, 0), (self.blocks_add_xx_1, 0))
self.connect((self.blocks_throttle_0_1_0_0, 0), (self.blocks_abs_xx_0, 0))
self.connect((self.blocks_throttle_0_1_0_0_0, 0), (self.blocks_abs_xx_0_0, 0))
self.connect((self.blocks_throttle_0_1_0_0_0_0, 0), (self.blocks_abs_xx_0_0_0, 0))
self.connect((self.blocks_throttle_0_1_0_0_0_1, 0), (self.blocks_multiply_const_vxx_1, 0))
self.connect((self.blocks_throttle_0_1_0_0_0_1_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_throttle_0_1_0_0_1, 0), (self.blocks_abs_xx_0_1, 0))
def closeEvent(self, event):
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_duty_cycle(self):
return self.duty_cycle
def set_duty_cycle(self, duty_cycle):
self.duty_cycle = duty_cycle
self.set_pulse2_phase_shift(-2*math.pi*self.duty_cycle)
self.blocks_multiply_const_vxx_3.set_k(self.duty_cycle)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_1_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_1_0_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_1_0_0_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_1_0_1.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_2.set_sampling_freq(self.samp_rate)
self.blocks_throttle_0_1_0_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0_1_0_0_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0_1_0_0_0_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0_1_0_0_0_1.set_sample_rate(self.samp_rate)
self.blocks_throttle_0_1_0_0_0_1_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0_1_0_0_1.set_sample_rate(self.samp_rate)
def get_pulse_length(self):
return self.pulse_length
def set_pulse_length(self, pulse_length):
self.pulse_length = pulse_length
self.analog_sig_source_x_1_0_0.set_phase(self.pulse2_phase_shift+math.pi*(1-self.pulse_length*(self.pulse_freq)))
self.analog_sig_source_x_1_0_0_0.set_phase(math.pi*(1-self.pulse_length*(self.pulse_freq*0.625)))
def get_pulse_freq(self):
return self.pulse_freq
def set_pulse_freq(self, pulse_freq):
self.pulse_freq = pulse_freq
self.analog_sig_source_x_0.set_frequency(self.pulse_freq)
self.analog_sig_source_x_1_0.set_frequency(self.pulse_freq)
self.analog_sig_source_x_1_0_0.set_frequency(self.pulse_freq)
self.analog_sig_source_x_1_0_0.set_phase(self.pulse2_phase_shift+math.pi*(1-self.pulse_length*(self.pulse_freq)))
self.analog_sig_source_x_1_0_0_0.set_frequency(self.pulse_freq)
self.analog_sig_source_x_1_0_0_0.set_phase(math.pi*(1-self.pulse_length*(self.pulse_freq*0.625)))
self.analog_sig_source_x_1_0_1.set_frequency(self.pulse_freq)
self.analog_sig_source_x_2.set_frequency(self.pulse_freq)
def get_pulse2_phase_shift(self):
return self.pulse2_phase_shift
def set_pulse2_phase_shift(self, pulse2_phase_shift):
self.pulse2_phase_shift = pulse2_phase_shift
self.analog_sig_source_x_1_0.set_phase(self.pulse2_phase_shift)
self.analog_sig_source_x_1_0_0.set_phase(self.pulse2_phase_shift+math.pi*(1-self.pulse_length*(self.pulse_freq)))
def get_output_samp_rate(self):
return self.output_samp_rate
def set_output_samp_rate(self, output_samp_rate):
self.output_samp_rate = output_samp_rate
self.blocks_head_0.set_length(self.output_samp_rate)
self.blocks_throttle_0.set_sample_rate(self.output_samp_rate)
self.blocks_throttle_0_0.set_sample_rate(self.output_samp_rate)
def get_output_filename(self):
return self.output_filename
def set_output_filename(self, output_filename):
self.output_filename = output_filename
self.blocks_file_sink_0.open(self.output_filename)
def generate_signal_for_duty_cycle(duty_cycle, signal_time, suffix, noise_seed):
out_path = "/tmp/pwm_side_channel_{}.bin".format(suffix)
compressed_signal_time = (REALTIME_PWM_FREQUENCY / PWM_FREQUENCY_MAX * signal_time)
tb = pwm_side_channel_sim(
duty_cycle=duty_cycle,
pwm_frequency=PWM_FREQUENCY_MAX,
output_filename=out_path,
noise_seed=noise_seed
)
tb.run()
return out_path
def trim_signal_to_one_second(file_handle):
"""
Get exactly one second (2048 * 50 samples) from an open generated signal file
:param file_handle:
:return:
"""
PEAK_THRESHOLD = -60.0
# find first peak of current signal, discard earlier data
sample_bytes = file_handle.read(4)
sample = struct.unpack("<f", sample_bytes)[0]
while sample < PEAK_THRESHOLD:
sample_bytes = file_handle.read(4)
sample = struct.unpack("<f", sample_bytes)[0]
yield sample_bytes
for i in range(2048 * 50):
# if this raises an error, so be it
sample_bytes = file_handle.read(4)
yield sample_bytes
def generate_composite_signal(duty_cycle_intervals, noise_seed):
az_duty_cycles, el_duty_cycles = duty_cycle_intervals
az_signal_files = [
generate_signal_for_duty_cycle(dc, 1, f"{noise_seed}_az_{i}", noise_seed)
for i, dc in enumerate(az_duty_cycles)
]
el_signal_files = [
generate_signal_for_duty_cycle(dc, 1, f"{noise_seed}_el_{i}", noise_seed*2)
for i, dc in enumerate(el_duty_cycles)
]
for az_signal_file, el_signal_file in zip(az_signal_files, el_signal_files):
f_az = open(az_signal_file, 'rb')
f_el = open(el_signal_file, 'rb')
for az_sample, el_sample in zip(
trim_signal_to_one_second(f_az),
trim_signal_to_one_second(f_el)
):
yield az_sample + el_sample
f_az.close()
f_el.close()
def make_observations(n_observations, challenge, tle_file, satellite_index, verbose=False):
observations = []
start = int(time.time())
now = start
observer = SatelliteObserver.from_strings(
challenge.groundstation_location.longitude,
challenge.groundstation_location.latitude,
challenge.satellites[satellite_index].name,
tle_file
)
antenna = AntennaController(motion_restrict=False, simulate=True)
for i in range(1, n_observations + 1):
difference = now - start
current_time = challenge.observation_time + difference
altitude, azimuth, distance = observer.altAzDist_at(current_time)
visible = observer.above_horizon(current_time)
if visible:
antenna.set_azimuth(azimuth)
antenna.set_elevation(altitude)
else:
antenna.set_azimuth(0)
antenna.set_elevation(0)
(az_duty, el_duty) = antenna.get_duty_cycles()
if verbose:
print("{}: {} at ({}, {}) from {} ({}) duty cycle: ({}, {})".format(
datetime.datetime.utcfromtimestamp(current_time),
observer.sat_name, azimuth, altitude, observer.where,
"visible" if visible else "not visible",
az_duty, el_duty), file=sys.stderr)
def int_duty_to_percent(duty, servo):
return round(
((float(duty) - servo._min_duty) / servo._duty_range) \
* (MAX_DUTY_PERCENT - MIN_DUTY_PERCENT) + MIN_DUTY_PERCENT,
4
)
obs = (
current_time,
int_duty_to_percent(az_duty, antenna.azimuth_servo),
int_duty_to_percent(el_duty, antenna.elevation_servo),
)
observations.append(obs)
now += WAIT_SEC
return observations
def generate_duty_cycles_for_seed(seed, tle_file, groundstation_file):
rcf = RandomChallengeFactory(tle_file, groundstation_file)
challenge = rcf.create_random_challenge(seed)
sat_duty_cycles = []
for sat_idx in range(3):
observations = make_observations(OBSERVATION_TIME, challenge, tle_file, sat_idx,
verbose=False)
az_duty_cycles = [obs[1] for obs in observations]
el_duty_cycles = [obs[2] for obs in observations]
sat_duty_cycles.append((az_duty_cycles, el_duty_cycles))
return sat_duty_cycles
def generate_signals_for_seed(seed, tle_file, groundstation_file):
sat_duty_cycles = generate_duty_cycles_for_seed(
seed,
tle_file,
groundstation_file
)
for i, (az_duty_cycles, el_duty_cycles) in enumerate(sat_duty_cycles):
print(f"azimuth: {az_duty_cycles[0]:02%} to {az_duty_cycles[-1]:02%}", file=sys.stderr)
print(f"elevation: {el_duty_cycles[0]:02%} to {el_duty_cycles[-1]:02%}", file=sys.stderr)
with open(f"/generator/signal_{i}.bin", 'wb+') as f:
for cycle in generate_composite_signal((az_duty_cycles, el_duty_cycles), seed + i):
f.write(cycle)
if __name__ == '__main__':
generate_signals_for_seed(1337, '/generator/satellites.txt', '/generator/groundstations.csv')
| UTF-8 | Python | false | false | 17,105 | py | 1,808 | signal_generator.py | 427 | 0.63192 | 0.598363 | 0 | 383 | 43.660574 | 167 |
delafields/delafields.github.io | 5,162,550,726,878 | 01b7426a6746d0c82845604a2d957590302ff1fb | ba449bb72bb72916d87a0c23d42c2c5c43959e85 | /movie-budgets/main.py | 6e71bd33cbbb00fa3bd148735d0ee60b6feac262 | [] | no_license | https://github.com/delafields/delafields.github.io | 9fb8fc3812b1acc65b3314e90e88ad4470de1ec5 | f49aaf3d47e77e46406e866b84e2f63c4664c74d | refs/heads/master | 2021-09-24T23:30:31.103203 | 2021-09-21T19:55:00 | 2021-09-21T19:55:00 | 213,717,571 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scrape import get_data
import os
import json
import gzip
# url followed by file name
from urls_to_scrape import urls_n_filenames
def make_json(url, file):
'''Scrape the data and put it into a json file
Parameters:
url (str): a valid url
file (str): the file being saved to
'''
# check if file exists
if os.path.exists(f'./data/{file}.json'):
print(f'./data/{file}.json already exists')
return
# scrape the data into a dict
dictionary = get_data(url)
# put dict into json and save to file
print(f'Data scraped. Saving to ./data/{file}.json')
with gzip.open(f'./data/{file}.json', 'wt', encoding='utf-8') as zipfile:
json.dump(dictionary, zipfile, indent=4)
# worked before. trying to replace with gzip_json
#with open(f'./json_data/{file}.json', 'w') as outfile:
# json.dump(dictionary, outfile, indent=4)
print('Saved!')
# where tha magic happens
for u_n_f in urls_n_filenames:
make_json(u_n_f[0], u_n_f[1])
| UTF-8 | Python | false | false | 1,030 | py | 98 | main.py | 26 | 0.633981 | 0.629126 | 0 | 38 | 26.078947 | 77 |
RyanTLX/cnn-crowd | 14,731,737,869,666 | 1463bb113f1c6577b574966ebd908744ea7a2e4b | a46431f0c1e0bc7f37c93ecc36ec0f0e14eaa642 | /CrowdEstimatorWeb/CrowdEstimatorWeb/views.py | 63d912bc14bd104e0e9da7f1bf0f4579e16eba85 | [] | no_license | https://github.com/RyanTLX/cnn-crowd | cdd74904dcf6545f1cadce167b1453129f8b1860 | 15bedbfb090bfc7e4f0a32d232fd1f7b47c88d67 | refs/heads/master | 2021-05-10T08:21:05.363096 | 2019-02-06T12:26:40 | 2019-02-06T12:26:40 | 118,882,989 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.http import JsonResponse
# import os
# import random
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from numpy import array
import numpy as np
import argparse
import tensorflow as tf
import os
from datetime import datetime, timedelta
from tqdm import tqdm
import random
# Default arguments.
image_height = 64
LR = 0.001
EPOCH = 10
CAM = 'camall'
image_width = None
X_train = None
y_train = None
X_val = None
y_val = None
X_test = None
y_test = None
filename_test = None
labels = None
model = None
model_name = None
model_path = None
def load_dataset():
global image_width, X_train, y_train, X_val, y_val, X_test, y_test, filename_test, labels
# Dataset paths.
training_data_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'Dataset', '64_{}_train_dataset.npy'.format(CAM)))
validate_data_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'Dataset', '64_{}_validate_dataset.npy'.format(CAM)))
testing_data_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'Dataset', '64_{}_test_dataset.npy'.format(CAM)))
# Check if datasets exists
if not os.path.exists(training_data_path):
print('Train dataset not found.')
exit()
if not os.path.exists(validate_data_path):
print('Validate dataset not found.')
exit()
if not os.path.exists(testing_data_path):
print('Test dataset not found.')
exit()
# Load datasets from paths.
train_data = np.load(training_data_path)
validate_data = np.load(validate_data_path)
test_data = np.load(testing_data_path)
image_width = len(train_data[0][0])
X_train = np.array([i[0] for i in train_data]).reshape(-1, int(image_height), int(image_width), 1)
y_train = [i[1] for i in train_data]
X_val = np.array([i[0] for i in validate_data]).reshape(-1, int(image_height), int(image_width), 1)
y_val = [i[1] for i in validate_data]
X_test = np.array([i[0] for i in test_data]).reshape(-1, int(image_height), int(image_width), 1)
y_test = [i[1] for i in test_data]
filename_test = [i[2] for i in test_data]
labels = ['empty', 'low', 'medium', 'high']
def predict_batch():
images = []
image_names = []
# Generate 6 random images.
for i in range(6):
random_image_no = random.randint(0, len(X_test)-1)
image_data = X_test[random_image_no]
image_name = filename_test[random_image_no]
images.append(image_data)
image_names.append(image_name)
# Prediction and results.
results = np.round(model.predict(images), decimals=3)
# Return predictions.
predictions = []
i = 0
for res in results:
top_k = res.argsort()[::-1]
prediction = labels[top_k[0]]
pred_image_name = image_names[i]
i += 1
color = "#D5E8D4"
if prediction == "medium":
color = "#FFF2CC"
elif prediction == "high":
color = "#F8CECC"
predictions.append({'cabin_image_name': pred_image_name, 'cabin_label': prediction, 'cabin_color': color})
return predictions
def reload_data(request):
if type(X_test) is type(None):
index(request) # To initialise all global variables first. Occurs when starting server while page already running.
params = predict_batch()
return JsonResponse({'params': params})
def index(request):
global LR, EPOCH, CAM, model_name, model_path, model
# Hide TensorFlow deprecated errors.
tf.logging.set_verbosity(tf.logging.ERROR)
load_dataset()
# Convolutional Neural Network
tf.reset_default_graph()
convnet = input_data(shape=[None, int(image_height), int(image_width), 1], name='input')
convnet = max_pool_2d(convnet, 2) # Makes the network run faster. Get most interesting parts first?
convnet = conv_2d(convnet, 16, 7, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 7, activation='relu')
convnet = conv_2d(convnet, 16, 5, activation='relu')
convnet = fully_connected(convnet, 64, activation='relu')
convnet = dropout(convnet, 0.5)
convnet = fully_connected(convnet, 4, activation='softmax') # Because 4 categories
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
# Package network into a model.
model = tflearn.DNN(convnet)
# Initialise model name.
model_name = str(image_height) + '_' + str(LR) + '_' + CAM + '_crowd_model'
model_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'Model', model_name))
model.load(model_path)
return render(request, 'CrowdEstimatorWeb/index.html')
| UTF-8 | Python | false | false | 4,924 | py | 7 | views.py | 4 | 0.65069 | 0.636881 | 0 | 150 | 31.826667 | 148 |
cash2one/xai | 14,534,169,342,267 | 7a20a04abed537cc4d8a331fb2654de28dc261a5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_falsehoods.py | 0c0f5989c9ef604505a247e5a552df3d69ecbd4e | [
"MIT"
] | permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xai.brain.wordbase.nouns._falsehood import _FALSEHOOD
#calss header
class _FALSEHOODS(_FALSEHOOD, ):
def __init__(self,):
_FALSEHOOD.__init__(self)
self.name = "FALSEHOODS"
self.specie = 'nouns'
self.basic = "falsehood"
self.jsondata = {}
| UTF-8 | Python | false | false | 259 | py | 37,275 | _falsehoods.py | 37,266 | 0.675676 | 0.675676 | 0 | 10 | 24.7 | 58 |
jspeyside/sample_test | 17,703,855,196,136 | 8ddb7f9c6bfa7153883cd855706eafde246b94e6 | 87a32969a4df595f38e613415a4cad810f417eb1 | /tests/unit/lib/test_math.py | 6ac1a4bf28701e2d9af8cb967dcbbf484f6728c7 | [
"MIT"
] | permissive | https://github.com/jspeyside/sample_test | 26b33ed8465a0494ce5e4b575ac6fb2fdb4fcd4e | d8577936ecc936ee1878e091555d2d4daea2983c | refs/heads/master | 2021-06-07T20:56:30.075230 | 2016-09-23T00:09:43 | 2016-09-23T00:09:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from tests.base import BaseTest
from lib.math import Point
class TestPoint(BaseTest):
def test_something(self):
time.sleep(45)
assert 1 == 1
def test_get_distance(self):
d1 = Point.get_distance(5, 0)
d2 = Point.get_distance(3, 10)
assert d1 == 5
assert d2 == 7
def test_distance_to_point(self):
p1 = Point(0, 0)
p2 = Point(0, 5)
assert p1.distance_to_point(p2) == 5
| UTF-8 | Python | false | false | 467 | py | 11 | test_math.py | 7 | 0.5803 | 0.528908 | 0 | 21 | 21.238095 | 44 |
Mofvsv/RenameFile | 3,779,571,229,677 | 16c6055e246b0b89ef4501ab8edb309e922f0a3d | 0b3b92c7dd1102f02a201b181ce683913e685cb3 | /Rename_files.py | 0a18af0231a00e9908c08ba699f16710ab53dd71 | [] | no_license | https://github.com/Mofvsv/RenameFile | a8faf19c3e29be7aafcc71f2cc7ae5624ad646d9 | d29377b105808f90254544bfa9777084f02ea37d | refs/heads/master | 2021-01-20T03:29:21.636272 | 2017-04-27T02:18:55 | 2017-04-27T02:18:55 | 89,545,836 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
def rename_files():
#get file names from a folder
file_list = os.listdir(r"C:\Users\TheHomie\Documents\Udacity\Github\python\Rename_file\prank")
print(file_list)
save_path = os.getcwd()
print("Current working directory" + save_path)
os.chdir(r"C:\Users\TheHomie\Documents\Udacity\Github\python\Rename_file\prank") # I can't figure out how to go into the prank sub directory
print("Current working directory" + save_path)
#for each file, rename filename
for file_name in file_list:
os.rename(file_name, file_name.translate(None, "0123456789"))
os.chdir(save_path)
rename_files()
| UTF-8 | Python | false | false | 638 | py | 1 | Rename_files.py | 1 | 0.705329 | 0.689655 | 0 | 16 | 38.875 | 144 |
DebugProd-inc/restfull | 7,035,156,453,119 | 30c36f4bc6f0c6a843b369b4ecfd30f7c89a8451 | f2fb33ec8901a2251cfb28b728f0bd19851b48bf | /app/api/api_routes/directions/create_direction.py | 016b3bacbc26ae4e684d5aec75dcede9b0fd2f5a | [] | no_license | https://github.com/DebugProd-inc/restfull | 5c876f7ebbe351272b582003cc6b971969104ddb | 1c3c779ebf26f70beea411fd6dd444fab8f7cadd | refs/heads/master | 2022-11-29T14:53:36.171711 | 2020-08-02T07:07:34 | 2020-08-02T07:07:34 | 277,918,870 | 0 | 0 | null | false | 2020-08-02T07:07:35 | 2020-07-07T20:47:04 | 2020-08-02T07:00:25 | 2020-08-02T07:07:34 | 140 | 0 | 0 | 0 | Python | false | false | from flask import (
url_for,
request,
jsonify
)
from app import db
from app.all_models import Direction
from app.api import bp
from app.api.errors import bad_request
from app.api.auth import token_auth
@bp.route('/directions', methods=['POST'])
@token_auth.login_required
def create_direction():
data = request.get_json() or {}
if 'point_of_departure' not in data or \
'point_of_destination' not in data:
return bad_request(
'must include point_of_departure, '
+ 'point_of_destination fields'
)
direction = Direction()
direction.from_dict(data)
db.session.add(direction)
db.session.commit()
response = jsonify(direction.to_dict())
response.status_code = 201
response.headers['Location'] = url_for(
'api.get_direction',
id=direction.id
)
return response
| UTF-8 | Python | false | false | 881 | py | 73 | create_direction.py | 68 | 0.644722 | 0.641317 | 0 | 35 | 24.171429 | 47 |
Aasthaengg/IBMdataset | 11,381,663,352,582 | 55d4206ef6107659606fe2a8f14a49b0b2f1040a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03228/s183306923.py | 0e9e3f52b9cf4339d158ee8512cd2b1f983ca07d | [] | no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a,b,k = map(int,input().split())
cnt=0
while cnt < k:
if cnt%2==0:
if a%2==0:
b+=a//2
a=a//2
cnt+=1
else:
a-=1
cnt+=1
b+=a//2
a=a//2
else:
if b%2==0:
a+=b//2
b=b//2
cnt+=1
else:
b-=1
cnt+=1
a+=b//2
b=b//2
print(a,b)
| UTF-8 | Python | false | false | 425 | py | 202,060 | s183306923.py | 202,055 | 0.263529 | 0.214118 | 0 | 24 | 16.666667 | 32 |
mcuttler/data-services | 910,533,098,067 | ef89004432887ef346c44f64086d88aa2bbe6528 | a02b0f048884ae8c1ab6ea83b520caf34bcb5704 | /AODN/AODN-WAVE-DM/DOT-WA-WAVE/awac_library/current_parser.py | d43e0dbdcaf47fc04a2433a985a7770519f11aa5 | [] | no_license | https://github.com/mcuttler/data-services | 6ca7216efd066f1032635bc9cc6766567c5faa41 | 9072a6f5b9054287870272c1c623cf97c9411923 | refs/heads/master | 2022-12-11T15:54:56.068216 | 2020-09-08T05:19:43 | 2020-09-08T05:19:43 | 294,862,558 | 0 | 0 | null | true | 2020-09-12T03:31:13 | 2020-09-12T03:31:12 | 2020-09-08T05:19:46 | 2020-09-09T07:09:48 | 57,222 | 0 | 0 | 0 | null | false | false | """
current_data_parser -> parse current data into a pandas df
gen_nc_current_deployment -> generate a NetCDF from a current file
"""
import datetime
import glob
import logging
import os
import re
import shutil
import tempfile
import numpy as np
import pandas as pd
from netCDF4 import Dataset, date2num
from common_awac import param_mapping_parser, NC_ATT_CONFIG, set_glob_attr, set_var_attr
from generate_netcdf_att import generate_netcdf_att
logger = logging.getLogger(__name__)
CURRENT_PARAMETER_MAPPING = os.path.join(os.path.dirname(__file__), 'current_parameters_mapping.csv')
CURRENT_COMMENT = """
Two Current text files are produced.
The Current text file names are a combination of the Location Name and the Deployment Name selected via the "Process Nortek WPR File" menu option together with either
Currents Bottom Up, or,
Currents Surface Down,
The current cells in the "Currents Bottom Up" text file are listed with the seabed as the datum and would be appropriate to seabed sediment drift investigations.
The current cells in the "Currents Surface Down" text file are listed with the sea surface as the datum and would be appropriate to surface drift investigations.
NetCDF files are generated using the "Bottom Up" files.
"""
def current_data_parser(filepath):
"""
parser of current data file
:param filepath: txt file path of AWAC tide data
:return: pandas dataframe of data, pandas dataframe of data metadata
"""
# parse current file and merge into datetime object Date and Time columns
df = pd.read_table(filepath, sep=r"\s*",
skiprows=15, parse_dates={'datetime': ['Date', 'Time']},
date_parser=lambda x:pd.datetime.strptime(x, '%d/%m/%Y %H:%M'),
engine='python')
# rename column
df.rename(columns={"m": "water_height"}, inplace=True)
df.rename(columns={"Vel": "Vel_average"}, inplace=True)
df.rename(columns={"Dir": "Dir_average"}, inplace=True)
# substract 8 hours from timezone to be in UTC
df['datetime'] = df['datetime'].dt.tz_localize(None).astype('O').values - datetime.timedelta(hours=8)
# retrieve metadata info
location = pd.read_csv(filepath, sep=r":", skiprows=4, nrows=1, header=None).values[0][1].strip()
n_cells = pd.read_csv(filepath, sep=r":", skiprows=7, nrows=1, header=None).values[0][1]
cell_size = pd.read_csv(filepath, sep=r":", skiprows=8, nrows=1, header=None).values[0][1].strip()
blanking_distance = pd.read_csv(filepath, sep=r":", skiprows=9, nrows=1, header=None).values[0][1].strip()
return df, {'deployment': location,
'number_of_cells': n_cells,
'cell_size': cell_size,
'blanking_distance': blanking_distance}
def gen_nc_current_deployment(deployment_path, metadata, site_info, output_path='/tmp'):
"""
generate a FV01 NetCDF file of current data.
:param deployment_path: the path to a tidal deployment (as defined in metadata txt file)
:param metadata: metadata output from metadata_parser function
:param output_path: NetCDF file output path
:return: output file path
"""
current_folder_path = os.path.join(deployment_path, "CURRENT")
data_current_file_ls = glob.glob('{current_folder_path}/*Bottom Up.txt'.format(
current_folder_path=current_folder_path))
missing_file_warn_str = 'No CURRENT data files available in {path}'.format(path=deployment_path)
if not data_current_file_ls:
logger.warning(missing_file_warn_str)
return None
current_data, current_metadata = current_data_parser(data_current_file_ls[0]) # only one file
var_mapping = param_mapping_parser(CURRENT_PARAMETER_MAPPING)
deployment_code = os.path.basename(deployment_path.split(' ')[0])
metadata[1]['deployment_code'] = deployment_code
site_code = metadata[1]['site_code']
nc_file_name = 'DOT-WA_ZV_{date_start}_{site_code}_AWAC-CURRENT_FV01_END-{date_end}.nc'.format(
date_start=current_data.datetime.dt.strftime('%Y%m%dT%H%M%SZ').values.min(),
site_code=site_code,
date_end=current_data.datetime.dt.strftime('%Y%m%dT%H%M%SZ').values.max()
)
temp_dir = tempfile.mkdtemp()
nc_file_path = os.path.join(temp_dir, nc_file_name)
try:
with Dataset(nc_file_path, 'w', format='NETCDF4') as nc_file_obj:
nc_file_obj.createDimension("TIME", current_data.datetime.shape[0])
nc_file_obj.createVariable("LATITUDE", "d", fill_value=99999.)
nc_file_obj.createVariable("LONGITUDE", "d", fill_value=99999.)
nc_file_obj.createVariable("TIMESERIES", "i")
nc_file_obj["LATITUDE"][:] = metadata[1]['lat_lon'][0]
nc_file_obj["LONGITUDE"][:] = metadata[1]['lat_lon'][1]
nc_file_obj["TIMESERIES"][:] = 1
var_time = nc_file_obj.createVariable("TIME", "d", "TIME")
# add gatts and variable attributes as stored in config files
generate_netcdf_att(nc_file_obj, NC_ATT_CONFIG, conf_file_point_of_truth=True)
time_val_dateobj = date2num(current_data.datetime.astype('O'), var_time.units, var_time.calendar)
var_time[:] = time_val_dateobj
df_varname_ls = list(current_data[current_data.keys()].columns.values)
df_varname_ls.remove("datetime")
current_cell_varname_pattern = re.compile(r"""(?P<varname>Dir|Vel)\.(?P<cell_number>[0-9].*)""")
for df_varname in df_varname_ls:
is_var_current_cell = False
is_var_current_average_cell = False
if current_cell_varname_pattern.match(df_varname):
fields = current_cell_varname_pattern.match(df_varname)
df_varname_mapped_equivalent = fields.group('varname')
mapped_varname = '{varname}_CELL_{cell_number}'.format(
varname=var_mapping.loc[df_varname_mapped_equivalent]['VARNAME'],
cell_number=fields.group('cell_number'))
is_var_current_cell = True
elif df_varname.endswith('_average'):
df_varname_mapped_equivalent = df_varname.split('_')[0]
mapped_varname = '{varname}_AVERAGE'.format(
varname=var_mapping.loc[df_varname_mapped_equivalent]['VARNAME'])
is_var_current_average_cell = True
else:
df_varname_mapped_equivalent = df_varname
mapped_varname = var_mapping.loc[df_varname_mapped_equivalent]['VARNAME']
dtype = current_data[df_varname].values.dtype
if dtype == np.dtype('int64'):
dtype = np.dtype('int16') # short
else:
dtype = np.dtype('f')
nc_file_obj.createVariable(mapped_varname, dtype, "TIME")
set_var_attr(nc_file_obj, var_mapping, mapped_varname, df_varname_mapped_equivalent, dtype)
if not mapped_varname == 'DEPTH':
setattr(nc_file_obj[mapped_varname], 'coordinates', "TIME LATITUDE LONGITUDE DEPTH")
if is_var_current_cell:
setattr(nc_file_obj[mapped_varname], 'cell_order', 'from sea bottom to top')
setattr(nc_file_obj[mapped_varname], 'cell_number', fields.group('cell_number'))
setattr(nc_file_obj[mapped_varname], 'cell_size', current_metadata['cell_size'])
setattr(nc_file_obj[mapped_varname], 'total_number_of_cells', current_metadata['number_of_cells'])
setattr(nc_file_obj[mapped_varname], 'blanking_distance_between_cells', current_metadata['blanking_distance'])
if is_var_current_average_cell:
setattr(nc_file_obj[mapped_varname], 'cell_comment', 'cell at depth average')
nc_file_obj[mapped_varname][:] = current_data[df_varname].values
# global attributes from metadata txt file
setattr(nc_file_obj, 'data_info', CURRENT_COMMENT)
setattr(nc_file_obj, 'number_of_cells', str(current_metadata['number_of_cells']))
setattr(nc_file_obj, 'cell_size', current_metadata['cell_size'])
setattr(nc_file_obj, 'blanking_distance', current_metadata['blanking_distance'])
set_glob_attr(nc_file_obj, current_data, metadata, site_info)
# we do this for pipeline v2
os.chmod(nc_file_path, 0664)
shutil.move(nc_file_path, output_path)
except Exception as err:
logger.error(err)
shutil.rmtree(temp_dir)
return nc_file_path
| UTF-8 | Python | false | false | 8,714 | py | 80 | current_parser.py | 56 | 0.632316 | 0.62543 | 0 | 183 | 46.617486 | 166 |
Denny2020/airflow | 16,870,631,572,862 | e0c13b2065e55a3cdaa1a25b8ac15e18827409e6 | 32df5d7d207a63e2521aaceda555ebfa9eac5266 | /dags/tomtom_data_migration.py | 880220fe3f626fe4a2cc232ff6bf065e37d2753a | [] | no_license | https://github.com/Denny2020/airflow | 7a29d1882284a418426e0969d68fe1a6a202a774 | e3e16effbc311bb7bdb5d410e65248c49b245efd | refs/heads/main | 2023-07-19T06:13:00.236477 | 2021-09-20T13:47:54 | 2021-09-20T13:47:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
from pathlib import Path
from model import Connection
import config
# Initialize Tomtom Table
def main(db_connection):
Path(config.CSV_FILE_DIR).mkdir(parents=True, exist_ok=True)
connection = Connection(db_connection)
session = connection.get_session()
session.execute('''CREATE TABLE IF NOT EXISTS tomtom (
timestamp INT PRIMARY KEY,
date_time TIMESTAMP,
traffic_index INT,
jams_count INT,
jams_length DECIMAL,
jams_delay DECIMAL,
traffic_index_weekago INT,
weekday VARCHAR(20))''')
session.commit()
session.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--connection", required=True, type=str)
args = parser.parse_args()
main(args.connection) | UTF-8 | Python | false | false | 791 | py | 9 | tomtom_data_migration.py | 8 | 0.686473 | 0.683944 | 0 | 29 | 26.310345 | 64 |
alishka1/selenium | 14,250,701,498,048 | a743402d67b12506e8c7554e9cd78d72a80c31c1 | 9e209a435ae8bbe6d847fec2c2f7d93424828c1d | /window_handle.py | bdbd070051c63e6b9e7b5e42b63cf5cf472a0671 | [] | no_license | https://github.com/alishka1/selenium | 8aa80d67a4a44ae7c51a90e03d417d059e3260ad | 26c7acf06132b01a0b6df9b087268d67aacbae6e | refs/heads/master | 2021-01-20T00:51:21.097711 | 2017-04-24T12:57:46 | 2017-04-24T12:57:46 | 89,200,288 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
import time
from bs4 import BeautifulSoup
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup as beatsop
import re
# driver = webdriver.Chrome("chromedriver")
driver = webdriver.Firefox()
# driver.set_page_load_timeout(30)
driver.implicitly_wait(10)
driver.get('https://www.facebook.com/login.php?next=https%3A%2F%2Fwww.facebook.com%2Fsharer.php%3Fs%3D100%26p%255Burl%255D%3Dhttps%253A%252F%252Fdiplomtime.com%252Ffriend%252F405729&display=popup')
time.sleep(2)
try:
n = driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')
print(n)
print "ok"
except NameError:
print "no"
time.sleep(2)
try:
element = driver.find_element_by_tag_name('body')
# n = ActionChains(driver).move_to_element(element).key_down(Keys.CONTROL).send_keys('f').key_up(Keys.CONTROL).perform()
n = ActionChains(driver).key_down(Keys.CONTROL).send_keys_to_element(element, 'f').key_up(Keys.CONTROL).perform()
print(n)
print 'ok'
except NameError:
print 'no'
time.sleep(2)
try:
n = driver.find_element_by_tag_name('body').send_keys(u'\ue031')
print(n)
print "ok"
except NameError:
print 'no'
time.sleep(2)
try:
n = driver.find_element_by_tag_name('body').send_keys(F5)
print(n)
print "ok"
except NameError:
print(n)
print 'no'
time.sleep(2)
try:
n = driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'r')
print(n)
print "ok"
except NameError:
print(n)
print 'no'
time.sleep(2)
try:
n = driver.find_element_by_xpath(".//*[@id='u_0_2']").click()
print(n)
print "ok"
except NameError:
print(n)
print 'no'
# ActionChains(driver) \
# .key_down(Keys.CONTROL) \
# .click(element) \
# .key_up(Keys.SHIFT) \
# .perform() \ | UTF-8 | Python | false | false | 2,034 | py | 5 | window_handle.py | 4 | 0.677974 | 0.649459 | 0 | 106 | 18.198113 | 197 |
pericles-tpt/SENG3011-Kool-Kats | 3,186,865,758,721 | d3321fea4ae29e32ab46782584525354074ba219 | 9ed545b6cf3615660d1d1188dd45117509745fcc | /PHASE_1/API_SourceCode/pychromeless/src/lambda_function.py | ad4fc1e0288b8de8ec854a33fe4ba03cba34681a | [
"Apache-2.0"
] | permissive | https://github.com/pericles-tpt/SENG3011-Kool-Kats | fb0e837ac420f0909218eb7106a3ca5d6fc8791b | 625b0fddedbebb47f9714c2ed0168f620ed1f164 | refs/heads/master | 2023-04-13T15:43:57.575788 | 2021-04-25T22:53:00 | 2021-04-25T22:53:00 | 344,713,229 | 0 | 0 | null | false | 2021-04-22T01:09:28 | 2021-03-05T06:13:13 | 2021-04-22T01:07:35 | 2021-04-22T01:09:27 | 54,911 | 0 | 0 | 0 | JavaScript | false | false | import time
import json # STEPHEN: I added this
import datetime
from webdriver_wrapper import WebDriverWrapper
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def lambda_handler():
#driver = webdriver.Chrome('/path/to/chromedriver')
#countries = get_countries()
#diseases = get_diseases()
#1996
articles = get_articles('Australia', 2018, 2020)
#handle_get_articles('2019-06-06 00:00:00', '2019-09-09 00:00:00')
#print('diseases ' + str(diseases))
#print("Space")
#print('countries ' + str(countries))
#print('Articles ' + str(articles))
#get_specific_disease(['Hepatitis', 'Acute diarrhoeal syndrome', 'fish'])
#send_to_sql()
return None
def get_diseases():
driver = WebDriverWrapper()
driver.get_url("https://www.who.int/csr/don/archive/disease/en/")
element_list = driver.find_name('col_2-1_1')
tags = element_list.find_elements_by_tag_name('a')
diseases = []
for items in tags:
if len(items.text) > 1:
diseases.append(items.text)
diseases.append('Legionnaires')
driver.close()
return diseases
def get_countries():
driver = WebDriverWrapper()
driver.get_url("https://www.who.int/csr/don/archive/country/en/")
element_list = driver.find_name('col_2-1_1')
tags = element_list.find_elements_by_tag_name('a')
countries = []
for items in tags:
if len(items.text) > 1 and items.text != "Back to Top":
countries.append(items.text)
driver.close()
return countries
"""
Country: If no country Specified then just return diseases from within a date frame
- If a country is specified then return articles for that country within that frame
Time frame:
Default 2020 - 2020
"""
def get_articles(country = None, date_from = None, date_to = None):
# Get articles relevant to country and date
# if not country or date is chosen then get all articles
# Disease Outbreak News
diseases = get_diseases()
if date_to is None:
date_to = 2020
if date_from is None:
date_from = 2019
else:
date_from = date_from
driver = WebDriverWrapper()
articles = {}
articles[country] = []
article_driver = WebDriverWrapper()
articles_for_sql = []
from_time = date_from
for from_time in range(date_from, date_to+1):
driver.get_url("https://www.who.int/csr/don/archive/year/{}/en/".format(from_time))
print(from_time)
element_list = driver.find_name('col_2-1_1')
tags = element_list.find_elements_by_tag_name('li')
## Search Title for Country if specified
for items in tags:
a_tag = items.find_elements_by_tag_name('a')
country_to_look = items.find_elements_by_tag_name('span')
if len(country_to_look) > 0:
# Check it is the country you are looking for
if country is not None:
print('country ' + str(country_to_look[0].text))
if country.lower() in country_to_look[0].text.lower():
# Look for a link
if a_tag is not None:
## Get name
d_name = ""
for disease_name in diseases:
if disease_name.lower() in country_to_look[0].text.lower():
d_name = disease_name.lower()
article_list = {}
# add the name of the dict as the name of the country
article_list['name'] = d_name
article_list['Articles'] = []
article_list['Headline'] = country_to_look[0].text
article_driver.get_url("https://www.who.int/csr/don/archive/year/{}/en/".format(from_time))
time.sleep(3)
article_driver.click_link(a_tag[0].text)
article_list['date'] = a_tag[0].text
print('date ' + str(a_tag[0].text))
article_driver.get_url("{}".format(a_tag[0].get_attribute('href')))
article_list['url'] = a_tag[0].get_attribute('href')
time.sleep(3)
wrapper = article_driver.find_name_byId('primary')
info = wrapper.find_elements_by_tag_name('span')
# Add article name and Information
for information in info:
article_list['Articles'].append(information.text)
#print(information.text)
articles[country].append(article_list)
else:
country_name = country_to_look[0].text.split("–")
print('country ' + str(country_name))
c_name = ""
if len(country_name) > 1:
articles[country_name[1]] = []
c_name = country_name[1]
else:
articles[country_name[0]] = []
c_name = country_name[0]
## Get the articles within a time frame
if a_tag is not None:
## Get name
d_name = ""
for disease_name in diseases:
if disease_name.lower() in country_to_look[0].text.lower():
d_name = disease_name.lower()
article_list = {}
# add the name of the dict as the name of the country
article_list['name'] = d_name
article_list['Articles'] = []
article_list['Headline'] = country_to_look[0].text
article_driver.get_url("https://www.who.int/csr/don/archive/year/{}/en/".format(from_time))
time.sleep(3)
article_driver.click_link(a_tag[0].text)
article_list['date'] = a_tag[0].text
article_driver.get_url("{}".format(a_tag[0].get_attribute('href')))
article_list['url'] = a_tag[0].get_attribute('href')
time.sleep(3)
wrapper = article_driver.find_name_byId('primary')
info = wrapper.find_elements_by_tag_name('span')
# Add article name and Information
for information in info:
article_list['Articles'].append(information.text)
# print(information.text)
# Get name of Country
articles[c_name].append(article_list)
# Here
for k,v in articles.items():
list_of_items = articles[k]
## Get each result
# print(k)
for occurence in list_of_items:
#print(str(occurence))
date = occurence['date']
date = date.split(" ")
day = date[0]
month = month_string_to_number(date[1])
year = date[2]
url = occurence['url']
cases = get_articles_cases(url) # TODO: Get the disease name and put it in as 2nd argument here
maintxt = occurence['Articles']
now = datetime.datetime(int(year), int(month), int(day), 0, 0, 0)
now.strftime('%Y-%m-%d %H:%M:%S')
maintxt = "\n".join(maintxt)
headline = occurence['Headline']
#flattened = [val for sublist in maintxt for val in sublist]
# TODO: Come back here, I have no idea why this doesn't work...
# Convert k (country) from "TITLE - COUNTRY" to "COUNTRY"
cargs = k.split()
newk = ''
for i in range(len(cargs)):
#print(str(i) + " cargs[i] " + cargs[i] )
if str(cargs[i]) == "-":
# Connect every index after this one into a string separated by spaces
#print('Stopped at cargs[i] is ' + cargs[i])
print(cargs[i+1:])
newk = ' '.join(cargs[i+1:])
break
print('Cargs is ' + str(cargs))
print('Old k is ' + k)
print('newk is ' + newk)
articles_for_sql.append((headline, str(k), str(occurence['name']), now, str(cases), str(url), str(maintxt)))
## Get Link, Save the info in the dict
from_time += 1
send_to_sql(articles_for_sql)
driver.close()
article_driver.close()
return articles
def month_string_to_number(string):
m = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr':4,
'may':5,
'jun':6,
'jul':7,
'aug':8,
'sep':9,
'oct':10,
'nov':11,
'dec':12
}
s = string.strip()[:3].lower()
try:
out = m[s]
return out
except:
raise ValueError('Not a month')
def get_specific_disease(diseases):
# Returns {disease: [{name, cases, article}], totalArticles: int, team:{name:'KoolKats', accessedTime:'', serviceTime:''}
# Go to the articles
driver = WebDriverWrapper()
ret = {}
ret['diseases'] = []
exists = True
for i in diseases:
diseasef = i.lower().replace(' ', '_')
driver.get_url("https://www.who.int/csr/don/archive/disease/{}/en/".format(diseasef))
try:
article_list = driver.find_name('col_2-1_1').find_elements_by_tag_name('li')
except:
exists = False
if exists:
article_list = driver.find_name('col_2-1_1').find_elements_by_tag_name('li')
totalArticles = len(article_list)
cases = 0
name = diseasef
for j in article_list:
article_url = j.find_elements_by_tag_name('a')[0].get_attribute('href')
# Go into article and count case numbers
print(article_url)
cases += get_articles_cases(article_url, i.lower().replace('_', ' '))
ret['diseases'].append({"name": name, "cases": cases, "articles_found_in": totalArticles})
exists = True
#print(json.dumps(ret))
return json.dumps(ret)
# Very basic and slow atm: Looks for word 'cases' and checks if word before it is a number, if so adds it to ccount
# Maybe we could put a limit on the number of articles the user can request for this function?
# e.g. 'cholera' has 280 articles and took upwards of 15 minutes to complete this.
def get_articles_cases(url, disease_name_spaces = None):
driver = WebDriverWrapper()
ccount = 0
driver.get_url(url)
paragraph_list = driver.find_by_id("primary").find_elements_by_tag_name("p")
for i in paragraph_list:
if (len(i.find_elements_by_tag_name("span")) > 0):
p_text = i.find_elements_by_tag_name("span")[0].get_attribute('innerHTML')
words = p_text.split()
n = 'a'
for j in range(len(words)):
stat = words[j-1].replace(',','')
if 'cases' in words[j] and stat.isdigit():
ccount += int(stat)
# break # <-- This mightn't be the best idea but I'm a bit impatient
elif 'cases' in words[j] and disease_name_spaces != None:
found = True
for k in range(0, len(disease_name_spaces)):
if (words[j-k] != disease_name_spaces[len(disease_name_spaces)-1-k]):
found = False
#break
if j-len(disease_name_spaces)-1 >= 0:
print('Index of number should be ' + words[j-len(disease_name_spaces)-1])
found_stat = words[j-len(disease_name_spaces)-1].replace(',','')
if (found == True) and found_stat.isdigit():
ccount += int(found_stat)
#print(ccount)
return ccount
def get_occurance_disease():
print('jsdkjs')
def handle_get_articles(date_start, date_end, country = None, disease = None):
import pymysql
db = pymysql.connect(host="database-restore.cmae6p4l3uws.us-east-1.rds.amazonaws.com",user="admin",db="scrape_db" , password="koolkats", port=3306)
cursor = db.cursor()
# Creates the WHERE part of the query
filters = []
where_query = ''
if country != None:
filters.append('Country=' + country)
elif disease != None:
filters.append('Disease=' + disease)
if len(filters) > 0:
where_query = 'AND '
for i in range(len(filters)):
if i == 0:
where_query += filters[i]
else:
where_query += ' AND ' + filters[i]
try:
query = "SELECT * FROM Articles WHERE Date >= '{}' AND Date <= '{}' {} ORDER BY Date;".format(date_start, date_end, where_query)
cursor.execute(query)
except:
print("Oops we had an error")
response = []
for (Country, Disease, Date, Cases, Url, MainText) in cursor:
response.append({"headline": "test", "url": Url, "location": Country, "reports": ["a", "b", "c"], "termsFound": Disease, "main_text": MainText, "date_of_publication": str(Date)})
ret = json.dumps(response)
print(ret)
def send_to_sql(articles):
import pymysql
# import datetime
# Open database connection
db = pymysql.connect(host="database-1.cmae6p4l3uws.us-east-1.rds.amazonaws.com",user="admin",db="scrape_db" , password="koolkats", port=3306)
# prepare a cursor object using cursor() method
cursor = db.cursor()
print(str(articles))
#query ="INSERT INTO Articles ({}, {}, {}, {});".format(str('IceLand'),str('Cold'),date,int(4)))
#print(now)
## Get data
## Country
## Disease
## Date
## Cases
## Url
query = "INSERT INTO Articles(Headline, Country, Disease, Date, Cases, Url, MainText) VALUES (%s, %s, %s, %s, %s, %s, %s);"
cursor.executemany(query, articles)
## MainText
#query = 'INSERT INTO table_name(column,column_1,column_2,column_3)
#VALUES(%s, %s, %s, %s)'
#csv_data = csv.reader(file('file_name'))
#my_data = []
#for row in csv_data:
# my_data.append(tuple(row))
#cursor.executemany(query, my_data)
#query = "INSERT INTO Articles(Country, Disease, Date, Cases) VALUES (%s, %s, %s, %s);
# execute SQL query using execute() method.
#result = cursor.execute("SELECT * FROM Articles;")
db.commit()
# Fetch a single row using fetchone() method.
# disconnect from server
db.close()
if __name__ == '__main__':
lambda_handler()
| UTF-8 | Python | false | false | 15,404 | py | 24 | lambda_function.py | 17 | 0.524477 | 0.5137 | 0 | 402 | 37.313433 | 186 |
tyedge/holbertonschool-higher_level_programming | 17,446,157,177,584 | 0b8669ca2ab41a2b5bc5f74d8382a8dbc6580cff | 9b3ef471582d4bc2e2a25dba7fdc96fa6153f1f1 | /0x05-python-exceptions/4-list_division.py | c8ca1b50e54f5f8c75a2721979e6c8439409a34c | [] | no_license | https://github.com/tyedge/holbertonschool-higher_level_programming | b9a0f8e117423d507eb3aceb606b6edec30ff481 | 9bbf03d405bec1def3aeeafb9c8ee89c6595d115 | refs/heads/master | 2020-05-17T22:29:19.840477 | 2019-09-27T03:49:21 | 2019-09-27T03:49:21 | 184,002,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
retval = []
num = 0
x = 0
while x in range(list_length):
try:
num = my_list_1[x] / my_list_2[x]
except IndexError:
print("out of range")
except TypeError:
print("wrong type")
except ZeroDivisionError:
print("division by 0")
finally:
retval.append(num)
num = 0
x += 1
return retval
| UTF-8 | Python | false | false | 492 | py | 101 | 4-list_division.py | 95 | 0.504065 | 0.48374 | 0 | 21 | 22.428571 | 53 |
sessionsdev/PythonFundamentals | 15,771,119,946,447 | e674a0b58e377757597ea5466955f4c40fdbeca3 | 18aadcca2b177d009b3487d3a7e718fecb9c4974 | /01_python_fundamentals/01_02_seconds_years.py | 3f15cbbeb5db33e02118f38422ef9ed0c49a86e9 | [] | no_license | https://github.com/sessionsdev/PythonFundamentals | c1ac8c4d6dbf6d6419a4ecd0bed665c57f5835bf | ad0bd0be4c488ce4cd2d97472fe5f1e45df547a1 | refs/heads/master | 2020-06-22T19:16:00.189612 | 2019-09-19T04:05:16 | 2019-09-19T04:05:16 | 197,786,113 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
From the previous example, move your calculation of
how many seconds in a year to a python executable script.
'''
days_in_year = 365
hours_in_day = 24
minutes_in_hours = 60
seconds_in_minutes = 60
seconds_in_year = days_in_year * hours_in_day * minutes_in_hours * seconds_in_minutes
print(seconds_in_year)
| UTF-8 | Python | false | false | 314 | py | 35 | 01_02_seconds_years.py | 35 | 0.729299 | 0.700637 | 0 | 14 | 21.428571 | 85 |
tuipik/ITEA | 4,801,773,442,743 | 7733d7ad4620a14805b7d78e9dea5185e53c5ca5 | 7d01b616624bc6e1788000a7171160bd9333b73d | /Lesson_03/test_homework01.py | d81fad432dff1906854f452f155b20b902f342c0 | [] | no_license | https://github.com/tuipik/ITEA | 37b786a92b04f369d73cca0997155a7a0a638887 | 813f6c05714175e517154db0fa5ba79374079d0c | refs/heads/master | 2020-03-26T20:39:56.474432 | 2018-10-17T16:22:11 | 2018-10-17T16:22:11 | 145,338,050 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itertools import islice
import pytest
from homework01 import call_count, fac, fib, flatten, gcd
@pytest.mark.parametrize('a, b', [
(5, 120),
(7, 5040),
(8, 40320),
(9, 362880)
])
def test_fac(a, b):
assert fac(a) == b
@pytest.mark.parametrize('a, b, c', [
(1, 1, 1),
(2, 3, 1),
(2, 4, 2),
(3, 8, 1),
(6, 9, 3),
(54, 24, 6)
])
def test_gcd(a, b, c):
assert gcd(a, b) == c
def test_fib():
head = islice(fib(), 10)
assert list(head) == [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
@pytest.mark.parametrize('in_seq, out_seq', [
([], []),
([1, 2], [1, 2]),
([1, [2, [3]]], [1, 2, 3]),
([(1, 2), (3, 4)], [1, 2, 3, 4])
])
def test_flatten(in_seq, out_seq):
assert list(flatten(in_seq)) == list(out_seq)
def test_call_count():
@call_count
def test_fn(*args, **kwargs):
return locals()
assert test_fn.call_count == 0
args = (1, 2)
kwargs = {'a': 3}
res = test_fn(*args, **kwargs)
assert res == {'args': args, 'kwargs': kwargs},\
'Декоратор некорректно обрабатывает возвращаемое значение функции'
assert test_fn.call_count == 1
test_fn()
test_fn()
assert test_fn.call_count == 3
with pytest.raises(Exception):
test_fn.call_count = 100
| UTF-8 | Python | false | false | 1,348 | py | 23 | test_homework01.py | 19 | 0.525213 | 0.457719 | 0 | 68 | 17.955882 | 74 |
SearchDataAPI/python-sdk | 9,414,568,334,032 | 70fab8e6add6c75918a7d398c1403e85f5f1b461 | 09f9892da13af4e642095dc809d99dd0f1f29de6 | /searchdata/SearchdataGoogleScholarProfiles.py | 40af5edd1cc9d01fbbb3ef4f1c6b51282c7dfad7 | [
"MIT"
] | permissive | https://github.com/SearchDataAPI/python-sdk | 71b48977f315e2ca43d24cee2c96e8567588f83b | 16afbca6650d1feeab745114782b7d673c1c32a7 | refs/heads/master | 2023-08-22T15:40:56.371089 | 2021-09-15T09:29:27 | 2021-09-15T09:29:27 | 406,332,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from searchdata import SearchdataSDK
class SearchdataGoogleScholarProfiles(SearchdataSDK):
def __init__(self, api_key: str):
super().__init__(api_key)
self.engine = 'google_scholar_profiles'
self.api_url = 'https://api.searchdata.io/v1'
self.is_searchdata_api = True
def set_mauthors(self, value: str):
"""
Set parameter mauthors
:param value: The terms that you are searching for (the query).
"""
self.params['mauthors'] = value
def set_after_author(self, value: str):
"""
Set parameter after_author
:param value: Defines the next page token and must preceed the value of before_author.
"""
self.params['after_author'] = value
def set_before_author(self, value: str):
"""
Set parameter before_author
:param value: Defines the previous page token.
"""
self.params['before_author'] = value
def set_hl(self, value: str):
"""
Set parameter hl
:param value: The language you want to use for your google search.
"""
self.params['hl'] = value
def get_mauthors(self) -> str:
"""
Get parameter mauthors
:return: Returns parameter mauthors
"""
return self.params['mauthors']
def get_after_author(self) -> str:
"""
Get parameter after_author
:return: Returns parameter after_author
"""
return self.params['after_author']
def get_before_author(self) -> str:
"""
Get parameter before_author
:return: Returns parameter before_author
"""
return self.params['before_author']
def get_hl(self) -> str:
"""
Get parameter hl
:return: Returns parameter hl
"""
return self.params['hl'] | UTF-8 | Python | false | false | 1,944 | py | 18 | SearchdataGoogleScholarProfiles.py | 16 | 0.55144 | 0.550926 | 0 | 72 | 25.027778 | 95 |
greenfox-velox/szemannp | 15,264,313,781,817 | f349d4dac044564f103e26bbd95b3976c6526d77 | 692a52c1ab298146fb9bbb648b8e3d0107231db0 | /week-03/day-2/31.py | 3d93ecd4e437d908c48917cc12e35d4dccdc92f1 | [] | no_license | https://github.com/greenfox-velox/szemannp | 8994c39eb13f0b248ef8b00236e3fd69f9b8f42c | 4e2ea37de4a88427c7be6c7b99cc5e6de34d02ec | refs/heads/master | 2021-01-17T13:26:26.172671 | 2016-07-20T11:19:15 | 2016-07-20T11:19:15 | 58,042,984 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | ae = 'Jozsi'
# create a function that greets ae
def greeting_function(name):
return "Greetings, " +name
print(greeting_function(ae))
| UTF-8 | Python | false | false | 139 | py | 164 | 31.py | 130 | 0.71223 | 0.71223 | 0 | 7 | 18.857143 | 34 |
pypeclub/pype | 3,015,067,077,408 | d497451f7f8e34122e6fab76e8c2e3a4894136de | c5f5326a1a959825c478655329fae7118276fba6 | /openpype/hosts/houdini/plugins/publish/validate_camera_rop.py | ca75579267662bfa3859da95ac84b3d91ecb0b4c | [
"MIT"
] | permissive | https://github.com/pypeclub/pype | 9e696e4d461c3cb628e7adf0fa642c7f4c98183d | 13888e5e5d0908cdb06dbac34f63166d33486f2b | refs/heads/main | 2021-06-10T17:15:05.160190 | 2021-06-09T04:03:46 | 2021-06-09T04:03:46 | 175,899,082 | 44 | 21 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pyblish.api
import openpype.api
class ValidateCameraROP(pyblish.api.InstancePlugin):
"""Validate Camera ROP settings."""
order = openpype.api.ValidateContentsOrder
families = ['camera']
hosts = ['houdini']
label = 'Camera ROP'
def process(self, instance):
import hou
node = instance[0]
if node.parm("use_sop_path").eval():
raise RuntimeError("Alembic ROP for Camera export should not be "
"set to 'Use Sop Path'. Please disable.")
# Get the root and objects parameter of the Alembic ROP node
root = node.parm("root").eval()
objects = node.parm("objects").eval()
assert root, "Root parameter must be set on Alembic ROP"
assert root.startswith("/"), "Root parameter must start with slash /"
assert objects, "Objects parameter must be set on Alembic ROP"
assert len(objects.split(" ")) == 1, "Must have only a single object."
# Check if the object exists and is a camera
path = root + "/" + objects
camera = hou.node(path)
if not camera:
raise ValueError("Camera path does not exist: %s" % path)
if not camera.type().name() == "cam":
raise ValueError("Object set in Alembic ROP is not a camera: "
"%s (type: %s)" % (camera, camera.type().name()))
| UTF-8 | Python | false | false | 1,403 | py | 188 | validate_camera_rop.py | 159 | 0.592302 | 0.590877 | 0 | 39 | 34.923077 | 78 |
karlazz/holbertonschool-python-camp | 3,650,722,239,718 | 892c776258bdf9495493bd0952e918fad0bd561c | 0ffed9396091b9a20055b5c7b464b6a2f0fca1aa | /0x02-python_if_else_loops_functions/104-no_c.py | 752456090d481c3b85743188edf0b1b1f5d7652e | [] | no_license | https://github.com/karlazz/holbertonschool-python-camp | 75c4f189aa26372ffa1a9fcc5a1bac817e9d630f | 8f5d71e68fea828d967be2e4520c919689ab1911 | refs/heads/master | 2020-03-19T04:31:23.838926 | 2018-06-03T23:49:23 | 2018-06-03T23:49:23 | 135,838,087 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
def no_c(str):
while str.lower().find('c') > -1:
pos=str.lower().find('c')
str = str[:pos] + str[pos+1:]
return str
| UTF-8 | Python | false | false | 149 | py | 13 | 104-no_c.py | 12 | 0.550336 | 0.530201 | 0 | 6 | 23.5 | 35 |
hikarihust/python | 4,080,218,952,060 | 61e4627143d75af0df6ed0859373dc20f1ad1c36 | b703018a330c9cf8ebe55b5273d4d511f66c7ff6 | /2.Xay-dung-cac-ung-dung-voi-python/1.ung-dung-tra-tu-dien/functions.py | 59b77e0369e8865cabc16ffd5c9a4ca29fd131a7 | [] | no_license | https://github.com/hikarihust/python | cadc89f2deef8a216832baafce7ff7bab40d9672 | e39f317c50959de6c975d87b69d4ef7579c2435e | refs/heads/master | 2023-05-24T19:08:46.770865 | 2023-05-21T16:05:32 | 2023-05-21T16:05:32 | 640,099,775 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from difflib import get_close_matches
def findWord(keyword, data):
result = "Not found"
if keyword in data:
result = showWord(data[keyword])
else:
listSuggest = get_close_matches(keyword, data.keys(), 3)
if(len(listSuggest) > 0) :
for suggest in listSuggest:
confirm = input("Bạn muốn tìm từ: {} (Enter y if yes, or n if no): ".format(suggest) )
if (confirm == "y"):
result = showWord(data[suggest])
break
elif (confirm == "n"):
continue
else :
result = "No valid"
break
return result
def showWord(word):
spelling = word[0]
means = join(word[1:], "\n")
return "Spelling: {spelling} \nMeans:\n{means}".format(
spelling = spelling,
means = means
)
def join(listInput, sep):
result = ''
for el in listInput:
result += '+ {}{}'.format(el, sep)
return result
| UTF-8 | Python | false | false | 1,060 | py | 69 | functions.py | 65 | 0.495726 | 0.491928 | 0 | 36 | 28.138889 | 102 |
froguez/git_tests-1 | 8,074,538,561,720 | 373b3908e32fa65de7cff2db9d7bf03030993117 | 55ba7d8a8d0c825e5aefdf555a343ca162fdd14f | /my_super_app.py | 9d6024d959e1a983a404b68ef7c7bbf1d3afe6b8 | [] | no_license | https://github.com/froguez/git_tests-1 | 563f0df2b38f7098b5578d2cd8ff8e6554e4f1c4 | e27b0a72b63327eeb6e8982ade31389180fdc34a | refs/heads/master | 2022-11-11T23:35:38.007784 | 2020-07-11T08:20:40 | 2020-07-11T08:20:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def say_something():
print("Hello git kraken")
def another_method():
print("this is another method!")
def something_else():
pass
def show_division():
print(1/1)
show_division() | UTF-8 | Python | false | false | 196 | py | 1 | my_super_app.py | 1 | 0.653061 | 0.642857 | 0 | 13 | 14.153846 | 36 |
SOFIA-USRA/sofia_redux | 14,216,341,794,653 | 63f41e68b43116e58bbe4c543ade6dcfe5b7b318 | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/pipeline/sofia/parameters/forcast_parameters.py | f15edb731ec31c61e19b570920c634ba766e4fd6 | [
"BSD-3-Clause"
] | permissive | https://github.com/SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""FORCAST parameter sets."""
from copy import deepcopy
from astropy.io import fits
from sofia_redux.pipeline.parameters import Parameters
__all__ = ['FORCASTParameters']
# Store default values for all parameters here.
# They could equivalently be read from a file, or
# constructed programmatically. All keys are optional;
# defaults are specified in the ParameterSet object.
# All 'key' values should be unique.
DEFAULT = {
'checkhead': [
{'key': 'abort',
'name': 'Abort reduction for invalid headers',
'value': True,
'description': 'If set, the reduction will be '
'aborted if the input headers '
'do not meet requirements',
'dtype': 'bool',
'wtype': 'check_box'}
],
'clean': [
{'key': 'save',
'name': 'Save output',
'value': False,
'description': 'Save output data to disk',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'badfile',
'name': 'Bad pixel map',
'value': '',
'description': 'FITS file containing bad pixel locations',
'dtype': 'str',
'wtype': 'pick_file'},
{'key': 'autoshift',
'name': 'Automatically detect readout shift',
'value': True,
'description': 'If set, data will be checked and '
'corrected for a 16-pixel readout shift.',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'shiftfile',
'name': 'Image number to shift (if not auto)',
'value': '',
'description': 'Specify "all", or semicolon-separated '
'image numbers, starting with 1. \nFor '
'example, to shift the 1st and 3rd image, '
'specify "1;3".',
'dtype': 'str',
'wtype': 'text_box'},
{'key': 'interpolate',
'name': 'Interpolate over bad pixels',
'value': False,
'description': 'If set, bad pixels will be interpolated over.\n'
'If not set, they will be propagated as NaN.',
'dtype': 'bool',
'wtype': 'check_box'},
],
'droop': [
{'key': 'save',
'name': 'Save output',
'value': False,
'description': 'Save output data to disk',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'fracdroop',
'name': 'Droop fraction',
'value': '',
'description': 'Numerical factor for droop correction amplitude.',
'dtype': 'float',
'wtype': 'text_box'},
],
'nonlin': [
{'key': 'save',
'name': 'Save output',
'value': False,
'description': 'Save output data to disk',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'secctr',
'name': 'Background section center',
'value': '',
'description': "Specify the center point in integers as 'x,y'.",
'dtype': 'str',
'wtype': 'text_box'},
{'key': 'secsize',
'name': 'Background section size',
'value': '',
'description': "Specify in integers as 'size_x,size_y'.",
'dtype': 'str',
'wtype': 'text_box'},
],
'stack': [
{'key': 'save',
'name': 'Save output',
'value': True,
'description': 'Save output data to disk',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'add_frames',
'name': "Add all frames instead of subtracting",
'value': False,
'description': 'Generates a sky image, for diagnostic purposes.',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'jbclean',
'name': "Apply 'jailbar' correction",
'value': True,
'description': 'If set, the jailbar pattern will be '
'removed after stacking.',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'bgscale',
'name': 'Scale frames to common level',
'value': False,
'description': 'If set, a multiplicative scaling will be applied.',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'bgsub',
'name': 'Subtract residual background',
'value': False,
'description': 'If set, an additive background level '
'will be removed.',
'dtype': 'bool',
'wtype': 'check_box'},
{'key': 'secctr',
'name': 'Background section center',
'value': '',
'description': "Specify the center point in integers as 'x,y'.",
'dtype': 'str',
'wtype': 'text_box'},
{'key': 'secsize',
'name': 'Background section size',
'value': '',
'description': "Specify in integers as 'size_x,size_y'.",
'dtype': 'str',
'wtype': 'text_box'},
{'key': 'bgstat',
'name': 'Residual background statistic',
'wtype': 'combo_box',
'options': ['median', 'mode'],
'option_index': 0,
'description': 'Select the statistic to use to calculate '
'the residual background.'},
],
}
class FORCASTParameters(Parameters):
"""Reduction parameters for the FORCAST pipeline."""
def __init__(self, default=None,
drip_cal_config=None, drip_config=None):
"""
Initialize parameters with default values.
The various config files are used to override certain
parameter defaults for particular observation modes,
or dates, etc.
Parameters
----------
drip_cal_config : dict-like, optional
Reduction mode and auxiliary file configuration mapping,
as returned from the sofia_redux.instruments.forcast
`getcalpath` function.
drip_config : dict-like, optional
DRIP configuration, as loaded by the
sofia_redux.instruments.forcast `configuration` function.
"""
if default is None: # pragma: no cover
default = DEFAULT
super().__init__(default=default)
self.drip_cal_config = drip_cal_config
self.drip_config = drip_config
def copy(self):
"""
Return a copy of the parameters.
Overrides default copy to add in config attributes.
Returns
-------
Parameters
"""
new = super().copy()
new.drip_cal_config = deepcopy(self.drip_cal_config)
new.drip_config = deepcopy(self.drip_config)
return new
def clean(self, step_index):
"""
Modify parameters for the clean step.
Sets default badfile, using `drip_cal_config`.
Parameters
----------
step_index : int
Reduction recipe index for the step.
"""
if (self.drip_cal_config is not None
and 'badfile' in self.drip_cal_config):
self.current[step_index].set_value(
'badfile', self.drip_cal_config['badfile'])
def droop(self, step_index):
"""
Modify parameters for the droop step.
Sets default droop fraction (fracdroop), using
`drip_config`.
Parameters
----------
step_index : int
Reduction recipe index for the step.
"""
if self.drip_config is not None:
from sofia_redux.instruments.forcast.getpar import getpar
fracdroop = getpar(fits.Header(), 'fracdroop',
dtype=float, default=0.0)
self.current[step_index].set_value('fracdroop', fracdroop)
def nonlin(self, step_index):
"""
Modify parameters for the nonlin step.
Sets default section center and size (secctr, secsize),
using `drip_config`.
Parameters
----------
step_index : int
Reduction recipe index for the step.
"""
# read background section from config file if possible
if self.drip_config is not None:
# assume config has already been
# loaded into sofia_redux.instruments.forcast.configuration
from sofia_redux.instruments.forcast.read_section \
import read_section
# assume image is standard 256x256 size
datasec = read_section(256, 256)
self.current[step_index].set_value(
'secctr', '{:.0f},{:.0f}'.format(datasec[0], datasec[1]))
self.current[step_index].set_value(
'secsize', '{:.0f},{:.0f}'.format(datasec[2], datasec[3]))
def stack(self, step_index):
"""
Modify parameters for the stack step.
Sets default background scaling and subtraction flags
(bgscale, bgsub) and section location (secctr, secsize),
using `drip_config` and `drip_cal_config`.
If the data is grism mode or C2NC2, background
subtraction and scaling are turned off by default. Otherwise,
the default is read from the DRIP config file.
Parameters
----------
step_index : int
Reduction recipe index for the step.
"""
# read background settings from config file
if (self.drip_config is not None
and self.drip_cal_config is not None):
from sofia_redux.instruments.forcast.getpar import getpar
header = fits.Header()
bgscale = getpar(header, 'BGSCALE', dtype=bool, default=False)
bgsub = getpar(header, 'BGSUB', dtype=bool, default=False)
# modify bg params by sky and grism mode
if (self.drip_cal_config['gmode'] != -1
or self.drip_cal_config['cnmode'] in ['C2NC2', 'C2NC4']):
bgsub = 0
bgscale = 0
# set parameter values in current set
self.current[step_index].set_value('bgscale', bgscale)
self.current[step_index].set_value('bgsub', bgsub)
# read section from config, as for nonlin
from sofia_redux.instruments.forcast.read_section \
import read_section
datasec = read_section(256, 256)
self.current[step_index].set_value(
'secctr', '{:.0f},{:.0f}'.format(datasec[0], datasec[1]))
self.current[step_index].set_value(
'secsize', '{:.0f},{:.0f}'.format(datasec[2], datasec[3]))
| UTF-8 | Python | false | false | 10,618 | py | 1,791 | forcast_parameters.py | 1,339 | 0.532869 | 0.527783 | 0 | 302 | 34.15894 | 77 |
Javoh1001/hello | 1,984,274,931,642 | 153356c9df912cd0eeba8baebd00693497620f5a | 33a65e403774531598e0fee1c13fb58c84f3c7d0 | /edu/admin.py | 63f368cc6a02823c4e1c00104e212861c2e1fec8 | [] | no_license | https://github.com/Javoh1001/hello | 8e9fba7a3b4d970020c1c2e491cb208f0e5b302b | 6b255713aa42275808ca6a2705b837edead9d9eb | refs/heads/master | 2023-02-24T00:31:31.635896 | 2021-01-28T02:56:02 | 2021-01-28T02:56:02 | 333,482,850 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Announcment
admin.site.register(Announcment)
# Register your models here.
| UTF-8 | Python | false | false | 129 | py | 22 | admin.py | 7 | 0.813953 | 0.813953 | 0 | 6 | 20.5 | 32 |
harvastum/PwJS-PiLB | 13,314,398,644,064 | 48c7aaf6dddd91d31bfff9b9b4afa76b18cfe595 | 03c4baa0e35b6f0c9a26a552667e0bb15ddf7bf7 | /2/konwersja_rozszerzenia.py | ba82d7b3a68bbc0b642360eac6f541ce1363ad06 | [] | no_license | https://github.com/harvastum/PwJS-PiLB | dc076279fd7563a7a23348e779c0e6e3433a7800 | f905254c01ee6ca0fb1e051ac5ca70638d31ce81 | refs/heads/master | 2023-02-03T12:04:36.183993 | 2020-12-21T06:23:20 | 2020-12-21T06:23:20 | 323,229,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
path, dirs, files = next(os.walk("dummy_files"))
for f in files:
if f.endswith('.jpg'):
os.rename(path+os.sep+f, path+os.sep+f.rstrip('.jpg')+'.png')
| UTF-8 | Python | false | false | 175 | py | 28 | konwersja_rozszerzenia.py | 19 | 0.605714 | 0.605714 | 0 | 7 | 23.857143 | 69 |
596350754/oldboy_website | 12,790,412,623,212 | 25343c4e93e0343f886acdade06d62c8a4d8b813 | 47cd8b7feb8dcbdd66ea8cde4ec25ffa2f2f644c | /hello.py | cc47191e1fb00a27e9faf371726f01feb7759a43 | [] | no_license | https://github.com/596350754/oldboy_website | 057107d7a63f235d9ebf782943a1d7b7745f1e09 | 268797d4dcb6d34348d9f6c598dab2db4437bdf1 | refs/heads/master | 2020-04-15T20:14:10.252629 | 2019-01-16T03:12:28 | 2019-01-16T03:12:28 | 164,983,855 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | hello
add one line
add two line
add three line
| UTF-8 | Python | false | false | 51 | py | 2 | hello.py | 1 | 0.72549 | 0.72549 | 0 | 4 | 11.75 | 16 |
innocentzyc/get_agentnumber | 2,095,944,049,123 | 114f3c97fbe339aeb5250124cf5b11c6b08516c8 | 53752d49125f88c6afaf04140f48a21b84e4214c | /request.py | c0f1e4f1983f327615cf307fad1877bdac99ccc0 | [] | no_license | https://github.com/innocentzyc/get_agentnumber | ce5704aece06871d1351a19bebf6685f70b745a3 | b7bc2fe7cca8a84267c4b1a572df6394af881fe2 | refs/heads/master | 2020-03-11T23:53:06.702380 | 2019-04-04T10:07:06 | 2019-04-04T10:07:06 | 130,334,583 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import time
from urllib import request
from selenium.webdriver.chrome.webdriver import WebDriver
from bs4 import BeautifulSoup
PAGE = 1
head_list = [
{'User-Agent': 'MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23'},
{'User-Agent': 'Opera/9.20 (Macintosh; Intel Mac OS X; U; en)'},
{'User-Agent': 'Opera/9.0 (Macintosh; PPC Mac OS X; U; en)'},
{'User-Agent': 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)'},
{'User-Agent': 'Mozilla/4.76 [en_jp] (X11; U; SunOS 5.8 sun4u)'},
{'User-Agent': 'iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)'},
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0'},
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0'},
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20120813 Firefox/16.0'},
{'User-Agent': 'Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)'},
{'User-Agent': 'Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)'},
{'User-Agent': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19'},
{'User-Agent': 'Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/3A101a Safari/419.3'}
]
driver = WebDriver()
def parser(page = 1):
req = request.Request('https://hangzhou.anjuke.com/tycoon/p%s/' % str(page), headers=random.choice(head_list))
response = request.urlopen(req)
soup = BeautifulSoup(response.read(), 'html.parser')
time.sleep(2)
is_last = soup.find(class_='aNxt')
lists = soup.find_all(class_="jjr-itemmod")
for list in lists:
a = list.find_all('a')
href = a[0].get('href')
driver.get(href)
time.sleep(1)
phone = driver.find_element_by_id('broker-nav-phone')
phone.click()
time.sleep(0.5)
number = driver.find_element_by_xpath('//*[@id="broker-nav-phone"]/span').text.replace(' ', '')
name = driver.find_element_by_xpath('/html/body/div[2]/div[2]/div/div[1]/div/a').text.split('的')[0]
with open('item', 'a', encoding='utf-8') as f:
f.write(name)
f.write(":")
f.write(number)
f.write('\n')
if is_last:
page = page + 1
parser(page)
time.sleep(10)
else:
driver.close()
if __name__ =="__main__":
parser()
| UTF-8 | Python | false | false | 2,501 | py | 4 | request.py | 3 | 0.595838 | 0.529812 | 0 | 81 | 29.839506 | 165 |
actsasrob/aws-system-administration-resources | 4,535,485,477,686 | ba0f1c36769e53e70d27e0ed1db764610b001efe | 0edc6b270b8707aa9cf409ebd61ac1e1358fce77 | /ch10/update_route53.py | e74ef729e48a4f07b0cc0b458ab1bffe02ceda3f | [
"MIT"
] | permissive | https://github.com/actsasrob/aws-system-administration-resources | 1b3a8484bb13914770905b793274ae37e3ca091e | 98410d23a358f41e64411681bbd667b6114614da | refs/heads/master | 2021-01-19T01:31:44.896637 | 2017-07-02T21:49:17 | 2017-07-02T21:49:17 | 87,246,867 | 0 | 0 | null | true | 2017-04-05T00:06:19 | 2017-04-05T00:06:19 | 2017-02-03T18:51:25 | 2014-08-26T12:21:51 | 148 | 0 | 0 | 0 | null | null | null | #!/usr/bin/python
import argparse
import boto.route53
from boto.utils import get_instance_metadata
def do_startup():
""" This function is executed when the instance launches. The instances
IP will be added to the master or slave DNS record. If the record
does not exist it will be created.
"""
# Check if the master resource record exists
if zone.get_cname(master_hostname) is None:
print 'Creating master record: %s' % master_hostname
status = zone.add_cname(master_hostname, instance_ip, ttl)
return
print "Master record exists. Assuming slave role"
# Check if the slave resource record exists - if more than one result is found by get_cname,
# an exception is raised. This means that more than one record exists so we can ignore it.
try:
slave_rr_exists = (zone.get_cname(slave_hostname) != None)
except boto.exception.TooManyRecordsException:
slave_rr_exists = True
if slave_rr_exists:
print 'Slave record exists. Adding instance to pool: %s' % slave_hostname
else:
print 'Creating slave record: %s' % slave_hostname
# Create or update the slave Weighted Resource Record Set
status = zone.add_cname(slave_hostname, instance_ip, ttl, slave_identifier)
def do_promote():
master_rr = zone.get_cname(master_hostname)
print 'Updating master record: %s %s' % (master_hostname, instance_ip)
zone.update_cname(master_hostname, instance_ip)
# Remove this instance from the slave CNAME pool by deleting its WRRS
print 'Removing slave CNAME: %s %s' % (slave_hostname, slave_identifier)
zone.delete_cname(slave_hostname, slave_identifier)
parser = argparse.ArgumentParser(description='Update Route 53 master/slave DNS records')
parser.add_argument('action', choices=['startup', 'promote'])
#parser.add_argument('--hosted-zone-id', required=True)
parser.add_argument('--domain', required=True)
parser.add_argument('--cluster-name', required=True)
parser.add_argument('--test')
args = parser.parse_args()
metadata = get_instance_metadata()
instance_ip = metadata['local-ipv4']
instance_id = metadata['instance-id']
ttl = 60 # seconds
master_hostname = 'master-%s.%s' % (args.cluster_name, args.domain)
slave_hostname = 'slave-%s.%s' % (args.cluster_name, args.domain)
# Identifier used for slave Weighted Resource Record Set
slave_identifier = ('slave-%s' % instance_id, 10)
conn = boto.route53.connect_to_region('eu-west-1')
zone = conn.get_zone(args.domain)
if args.action == 'startup':
do_startup()
elif args.action == 'promote':
do_promote() | UTF-8 | Python | false | false | 2,490 | py | 62 | update_route53.py | 21 | 0.738956 | 0.734137 | 0 | 71 | 34.084507 | 93 |
Whatever929/text_based_game | 5,171,140,660,317 | 783b92798eb31fad783a6641d5f045a03ee93451 | d313b6b79502434e5c06ff720b7f18f0bf8f32a8 | /parser_ambiguous_test.py | 181bbf0f8df34a98485f59edd60700de47173aab | [] | no_license | https://github.com/Whatever929/text_based_game | 2ab7623550199a50d43009fcc49e657a9603a823 | 4a48c4090994921c1211fbfe7c946e3a2eb83052 | refs/heads/master | 2022-12-07T14:48:22.123910 | 2020-09-05T15:10:09 | 2020-09-05T15:10:09 | 285,208,430 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # To be decided: If an ambiguous answer is given, output a tuple with an "ambiguous" followed by
# two possible action.
import unittest
import src.parser as p
class AmbiguousParserTest(unittest.TestCase):
pass | UTF-8 | Python | false | false | 215 | py | 22 | parser_ambiguous_test.py | 20 | 0.776744 | 0.776744 | 0 | 8 | 26 | 96 |
yh97yhyh/ProblemSolving | 2,628,520,019,837 | 9a3a51d4e95db7833be7259ef0d138e8e47e0865 | 5070296fcee2b8582daa7520da391b0d8ca6ea24 | /programmers/level1/level1_03.py | e39e186feaded0c82ec5fc4f918492ef0ea8ba34 | [] | no_license | https://github.com/yh97yhyh/ProblemSolving | 4aca790f401c868d598fdeb5a1ce27f034b64cf1 | 48c7bd3e3fef52ddf377c16535fe98ebf908916f | refs/heads/master | 2023-03-17T18:47:15.088551 | 2021-03-07T09:31:02 | 2021-03-07T09:31:02 | 291,486,975 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
< 가운데 글자 가져오기 >
'''
s1 = "abcde"
s2 = "qwer"
def solution(s):
length = len(s)
half = int(length / 2)
answer = ''
if length % 2 == 1: # 홀수
answer = s[half]
else:
answer = s[half-1:half+1]
return answer
print(solution(s1))
print(solution(s2)) | UTF-8 | Python | false | false | 311 | py | 110 | level1_03.py | 100 | 0.519031 | 0.487889 | 0 | 20 | 13.5 | 33 |
BobSu628/A-Typical-Game | 8,478,265,484,115 | edc9952d454305d909fea70bdb78ef97895c0816 | f2eb0d158ce4b99a309be06a3bcec5074759b329 | /sprites/spr_txt_lava.py | 233cdccf4fd0ed201c5e9c7f929e6c3d2c820ece | [] | no_license | https://github.com/BobSu628/A-Typical-Game | 008d1a51884c0b89bc8afdd1cea42dacd543df20 | a3e7ed5acffe56896ddd567faf4f83e3904d4885 | refs/heads/master | 2020-03-29T18:20:57.594869 | 2018-09-25T04:20:36 | 2018-09-25T04:20:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sprites.spr_parent import SprPar
class SprTxtLava(SprPar):
def __init__(self, pos):
super().__init__(pos,
["resources/txt_lava.png"])
| UTF-8 | Python | false | false | 178 | py | 34 | spr_txt_lava.py | 34 | 0.550562 | 0.550562 | 0 | 7 | 24.428571 | 52 |
rickpeyton/5-stones-church-rss-feed-parser-lambda | 12,068,858,115,626 | 25e644f8dcdf4f5cace57cee80b8075c6af6a6df | aa3468d0e4b4f55b74856c11654305eea00ce6d1 | /src/parser.py | 3bd2f15b1cfea015d7dc649494a97dff636f7efd | [] | no_license | https://github.com/rickpeyton/5-stones-church-rss-feed-parser-lambda | 7b4157ec365491b5188777e3a5289bda8aea3a7f | 847b1c43cf6ee676111157aafc554e96b108bf05 | refs/heads/master | 2021-01-19T01:45:10.752203 | 2017-12-28T18:14:52 | 2017-12-28T18:14:52 | 84,394,818 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
import requests
import re
from datetime import datetime
from bs4 import BeautifulSoup
import os
import yaml
def lambda_handler(event, context):
response = send_request()
if True == False:
with open(os.path.join(os.path.dirname(__file__), ".", "request_fixture_20171113.yml"), 'w') as f:
f.write(yaml.dump(response))
parsed_xml = BeautifulSoup(response.text, 'lxml-xml')
meta = Meta(parsed_xml)
messages = parse_messages(parsed_xml)
json_response = {
"title": meta.title,
"url": meta.url,
"description": meta.description,
"messages": messages
}
return json_response
def parse_messages(response):
messages = []
counter = 1
for i in response.findAll("item"):
message = Message(i, counter).as_dict()
counter = counter + 1
message_date = datetime.strptime(message["date"], '%b %d, %Y')
if message_date > datetime(2017, 06, 25):
messages.append(message)
return messages
class Meta(object):
def __init__(self, response):
self.title = response.rss.channel.title.renderContents()
self.url = response.findAll("link")[2].renderContents()
self.description = response.rss.channel.description.renderContents()
class Message(object):
def __init__(self, response, counter):
self.order = counter
self.title = response.title.renderContents()
self.published_date = response.pubDate.renderContents()
self.file = response.link.renderContents()[5:]
self.date = UrlToDate(self.file).date()
self.image = response.find('itunes:image')['href'].encode('ascii')[5:]
def as_dict(self):
return {
"order": self.order,
"title": self.title,
"published_date": self.published_date,
"date": self.date,
"file": self.file,
"image": self.image,
}
class UrlToDate(object):
def __init__(self, url):
self.url = url
def date(self):
date_string = re.search('(?=\/(\d{8}))', self.url).group(1)
date_object = datetime.strptime(date_string, '%Y%m%d')
return date_object.strftime('%b %d, %Y')
def send_request():
try:
feed = requests.get("http://5stoneschurch.libsyn.com/rss")
return feed
except requests.exceptions.RequestException:
print('HTTP Request failed')
| UTF-8 | Python | false | false | 2,466 | py | 9 | parser.py | 2 | 0.607056 | 0.596918 | 0 | 92 | 25.804348 | 106 |
tsushiy/competitive-programming-submissions | 15,607,911,165,411 | 981c42c76e1fea8b6a70ae61b10381e2fc03cea0 | 16d159d6d3fe69d513717caad3e2c21320f93224 | /AtCoder/AGC/agc006/agc006a.py | 151ebcc726a27eadf580ec01615309822ceab310 | [] | no_license | https://github.com/tsushiy/competitive-programming-submissions | d4f068a5157c0de0f1822367e0ca66dd978e43f9 | 9011d855d9252134179cc9cc8f328f6e0ca32407 | refs/heads/master | 2023-04-11T08:34:01.015316 | 2021-04-11T15:16:17 | 2021-04-11T15:17:35 | 175,807,862 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
s = input()
t = input()
cnt = 0
if s==t:
print(n)
exit()
for i in range(n):
if s[i:]==t[:-i]:
cnt = max(cnt, n-i)
print(2*n-cnt) | UTF-8 | Python | false | false | 155 | py | 769 | agc006a.py | 768 | 0.503226 | 0.490323 | 0 | 11 | 13.181818 | 23 |
devils-ey3/regexSolutionHackerrank | 19,241,453,508,389 | 77303df9f75c77a0b41feb8507f1bb6afa40f52a | da144ebe6e259518e8d8c01f0600667182a647cb | /Introduction/Matching Digits & Non-Digit Characters.py | 3b5460c8c2748d73d42f5145afb83d3fe49390ae | [] | no_license | https://github.com/devils-ey3/regexSolutionHackerrank | 3b9a138c25393572f5de841eb7055170e3002db0 | 4a6f25f76edada07ef85f087783cbb2fab0d036d | refs/heads/master | 2021-05-11T01:19:21.422028 | 2018-05-15T22:34:25 | 2018-05-15T22:34:25 | 118,327,372 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://www.hackerrank.com/challenges/matching-digits-non-digit-character/problem
import re
Regex_Pattern = r"^\d{2}[^\d]\d{2}[^\d]\d{4,}$" # Do not delete 'r'.
| UTF-8 | Python | false | false | 164 | py | 43 | Matching Digits & Non-Digit Characters.py | 42 | 0.670732 | 0.652439 | 0 | 4 | 40 | 83 |
DanielHull/PinAnalysis | 9,947,144,284,875 | 1258f4cf848287f371ce5d3e2373f75b1bc50a9e | 4377da33e37abc7b665be30fa62cb1aaaaf02504 | /Pin_Analysis/test_check_pins.py | a5ee96ca87af0ed45d3998d4c11b70c5df9746cc | [] | no_license | https://github.com/DanielHull/PinAnalysis | 134f8165efa0d11138feefabd7a21f158c63e3da | 4c33343f7dec373ef49a29ce641eb3a443245eda | refs/heads/master | 2020-07-15T17:17:11.047304 | 2019-09-01T01:40:46 | 2019-09-01T01:40:46 | 205,615,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pin_analysis_tools import *
import numpy as np
import sys, os
global sys, os, np
global box_path
box_path = get_box_pathway()
sys.dont_write_bytecode = True
# adds tech dev tool box to path (requirement)
sys.path.insert(0, box_path + "Engineering\Tech_Dev_Software\PythonDev\TechDevPythonAnalyticsTools")
from tech_dev_python_analytics_tools import *
from check_pins import *
def test_grab_analyze_logfile():
# take a log file and check to make sure its averaging properly
cp = CheckPins("C:\Users\dhall\Box Sync\\Engineering\\Pin_Analysis\\test_folder", "10045_GoodDataSet.csv")
cp.grab_analyze_logfile("C:\Users\dhall\Box Sync\Engineering\Pin_Analysis\\test_folder\\results")
os.chdir(box_path + "Engineering\\Pin_Analysis\\test_folder")
my_read = read_whole_csv_file('average_from_test_file.csv', True)
read = my_read[0]
read = read.astype(float)
np.testing.assert_array_almost_equal(read, cp.average_of_trial)
def test_check_impedances():
# take a bad csv file and check to make sure it fails
cp = CheckPins("C:\Users\dhall\Box Sync\\Engineering\\Pin_Analysis\\test_folder", "10045_GoodDataSet.csv")
cp.grab_analyze_logfile("C:\Users\dhall\Box Sync\Engineering\Pin_Analysis\\test_folder\\results")
cp.check_impedances()
assert cp.high_risk_dictionary.keys() == [8, 41, 28]
def test_user_prompt():
cp = CheckPins("C:\Users\dhall\Box Sync\\Engineering\\Pin_Analysis\\test_folder", "10045_GoodDataSet.csv")
cp.grab_analyze_logfile("C:\Users\dhall\Box Sync\Engineering\Pin_Analysis\\test_folder\\results")
cp.check_impedances()
cp.user_prompt()
cp = CheckPins("C:\Users\dhall\Box Sync\\Engineering\\Pin_Analysis\\test_folder", "10045_GoodDataSet.csv")
cp.grab_analyze_logfile("C:\Users\dhall\Box Sync\Engineering\Pin_Analysis\\test_folder\\results_good")
cp.check_impedances()
cp.user_prompt()
def main():
test_grab_analyze_logfile()
test_check_impedances()
test_user_prompt()
main()
| UTF-8 | Python | false | false | 1,990 | py | 78 | test_check_pins.py | 7 | 0.720101 | 0.706533 | 0 | 45 | 43.222222 | 110 |
planetlabs/planet-client-python | 16,758,962,423,817 | 1693eb34fb5efdcdab3e24033786573e6acf79c8 | f42cd8413ac5bec6526cc1013253d00fd9becc62 | /tests/integration/test_auth_cli.py | 62fbd3563a6da5d6ef98959c9a4cc05b980f971b | [
"Apache-2.0"
] | permissive | https://github.com/planetlabs/planet-client-python | dda964d1d0a02e6413965fffa2f24f8a0b9d51f9 | 8b29a9300f8a144cc56a171f102b1a068fd6b692 | refs/heads/main | 2023-08-08T07:24:51.429307 | 2023-07-20T19:49:08 | 2023-07-20T19:49:08 | 37,222,273 | 269 | 89 | Apache-2.0 | false | 2023-08-01T00:52:42 | 2015-06-10T20:58:37 | 2023-07-25T15:12:18 | 2023-08-01T00:52:42 | 6,409 | 243 | 77 | 95 | Python | false | false | # Copyright 2022 Planet Labs PBC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from http import HTTPStatus
import json
import os
from click.testing import CliRunner
import httpx
import jwt
import pytest
import respx
from planet.cli import cli
TEST_URL = 'http://MockNotRealURL/api/path'
TEST_LOGIN_URL = f'{TEST_URL}/login'
# skip the global mock of _SecretFile.read
# for this module
@pytest.fixture(autouse=True, scope='module')
def test_secretfile_read():
return
@pytest.fixture
def redirect_secretfile(tmp_path):
"""patch the cli so it works with a temporary secretfile
this is to avoid collisions with the actual planet secretfile
"""
secretfile_path = tmp_path / 'secret.json'
with pytest.MonkeyPatch.context() as mp:
mp.setattr(cli.auth.planet.auth, 'SECRET_FILE_PATH', secretfile_path)
yield secretfile_path
@respx.mock
def test_cli_auth_init_success(redirect_secretfile):
"""Test the successful auth init path
Also tests the base-url command, since we will get an exception
if the base url is not changed to the mocked url
"""
payload = {'api_key': 'test_cli_auth_init_success_key'}
resp = {'token': jwt.encode(payload, 'key')}
mock_resp = httpx.Response(HTTPStatus.OK, json=resp)
respx.post(TEST_LOGIN_URL).return_value = mock_resp
result = CliRunner().invoke(cli.main,
args=['auth', '--base-url', TEST_URL, 'init'],
input='email\npw\n')
# we would get a 'url not mocked' exception if the base url wasn't
# changed to the mocked url
assert not result.exception
assert 'Initialized' in result.output
@respx.mock
def test_cli_auth_init_bad_pw(redirect_secretfile):
resp = {
"errors": None,
"message": "Invalid email or password",
"status": 401,
"success": False
}
mock_resp = httpx.Response(401, json=resp)
respx.post(TEST_LOGIN_URL).return_value = mock_resp
result = CliRunner().invoke(cli.main,
args=['auth', '--base-url', TEST_URL, 'init'],
input='email\npw\n')
assert result.exception
assert 'Error: Incorrect email or password.\n' in result.output
def test_cli_auth_value_success(redirect_secretfile):
key = 'test_cli_auth_value_success_key'
content = {'key': key}
with open(redirect_secretfile, 'w') as f:
json.dump(content, f)
result = CliRunner().invoke(cli.main, ['auth', 'value'])
assert not result.exception
assert result.output == f'{key}\n'
def test_cli_auth_value_failure(redirect_secretfile):
result = CliRunner().invoke(cli.main, ['auth', 'value'])
assert result.exception
assert 'Error: Auth information does not exist or is corrupted.' \
in result.output
def test_cli_auth_store_cancel(redirect_secretfile):
result = CliRunner().invoke(cli.main, ['auth', 'store', 'setval'],
input='')
assert not result.exception
assert not os.path.isfile(redirect_secretfile)
def test_cli_auth_store_confirm(redirect_secretfile):
result = CliRunner().invoke(cli.main, ['auth', 'store', 'setval'],
input='y')
assert not result.exception
with open(redirect_secretfile, 'r') as f:
assert json.load(f) == {'key': 'setval'}
| UTF-8 | Python | false | false | 3,888 | py | 121 | test_auth_cli.py | 61 | 0.658693 | 0.655093 | 0 | 123 | 30.609756 | 79 |
NobodyWHU/Leetcode | 13,219,909,365,820 | e754f37a951fc0a21c57bc60e9c0f13c3ee241b8 | 4e0ff785b993b6bae70745434e61f27ca82e88f0 | /36-Valid-Sudoku/solution.py | 4f1d8a7fc8e9f0082a0939eaf7573dafafaa0d93 | [] | no_license | https://github.com/NobodyWHU/Leetcode | 2ee557dd77c65c5fa8ca938efb6de3793b4de261 | d284fa3daab02531e5300867463b293d44737e32 | refs/heads/master | 2021-01-23T14:05:28.161062 | 2016-09-23T11:51:51 | 2016-09-23T11:51:51 | 58,898,114 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
m = len(board)
n = len(board[0])
if m != 9 or n != 9:
return False
def checkValid(valueList):
temp = collections.defaultdict(int)
for v in valueList:
temp[v] += 1
for k, v in temp.items():
if k != "." and v > 1:
return False
return True
# check row
for row in range(m):
if not checkValid(board[row]):
return False
# check column
for column in range(n):
columnList = []
for row in range(m):
columnList.append(board[row][column])
if not checkValid(columnList):
return False
# check square
for i in range(3):
for j in range(3):
squareList = []
for p in range(3):
for q in range(3):
squareList.append(board[i*3+q][j*3+p])
if not checkValid(squareList):
return False
return True | UTF-8 | Python | false | false | 1,311 | py | 161 | solution.py | 119 | 0.41495 | 0.40656 | 0 | 47 | 26.914894 | 62 |
fatancy2580/MDL-CPI | 10,376,641,007,197 | cee96c3609472adb282c86542b940beb46f7c369 | de894d7f60b0394fbc766d096c5cbbf9f2fef09b | /MDL-CPI/MDL-CPI.py | deb0dd8e33f503251651e15fee99cf6a53352b98 | [
"MIT"
] | permissive | https://github.com/fatancy2580/MDL-CPI | c6fa4a02b9d31d08b7eb40e694048d7a933f74b1 | 9e264b1aa076c322a4c575b857a62c8155ca8d00 | refs/heads/main | 2023-05-03T20:50:13.400443 | 2021-05-21T14:02:53 | 2021-05-21T14:02:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pickle
import timeit
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import roc_auc_score, precision_score, recall_score, precision_recall_curve, auc, roc_curve
import csv
import os
import sys
"""BERT"""
def get_attn_pad_mask(seq):
batch_size, seq_len = seq.size()
pad_attn_mask = seq.data.eq(0).unsqueeze(1) # [batch_size, 1, seq_len]
pad_attn_mask_expand = pad_attn_mask.expand(batch_size, seq_len, seq_len) # [batch_size, seq_len, seq_len]
return pad_attn_mask_expand
class Embedding(nn.Module):
def __init__(self):
super(Embedding, self).__init__()
self.tok_embed = nn.Embedding(vocab_size, d_model) # token embedding (look-up table)
self.pos_embed = nn.Embedding(max_len, d_model) # position embedding
self.norm = nn.LayerNorm(d_model)
def forward(self, x):
seq_len = x.size(1) # x: [batch_size, seq_len]
pos = torch.arange(seq_len, device=device, dtype=torch.long) # [seq_len]
pos = pos.unsqueeze(0).expand_as(x) # [seq_len] -> [batch_size, seq_len]
embedding = self.pos_embed(pos)
embedding = embedding + self.tok_embed(x)
embedding = self.norm(embedding)
return embedding
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, Q, K, V, attn_mask):
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size, n_head, seq_len, seq_len]
scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is one.
attn = nn.Softmax(dim=-1)(scores) # [batch_size, n_head, seq_len, seq_len]
context = torch.matmul(attn, V) # [batch_size, n_head, seq_len, d_v]
return context, attn
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__()
self.W_Q = nn.Linear(d_model, d_k * n_head)
self.W_K = nn.Linear(d_model, d_k * n_head)
self.W_V = nn.Linear(d_model, d_v * n_head)
self.linear = nn.Linear(n_head * d_v, d_model)
self.norm = nn.LayerNorm(d_model)
def forward(self, Q, K, V, attn_mask):
residual, batch_size = Q, Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1, 2) # q_s: [batch_size, n_head, seq_len, d_k]
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1, 2) # k_s: [batch_size, n_head, seq_len, d_k]
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1, 2) # v_s: [batch_size, n_head, seq_len, d_v]
attn_mask = attn_mask.unsqueeze(1).repeat(1, n_head, 1, 1)
context, attention_map = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)
context = context.transpose(1, 2).contiguous().view(batch_size, -1,
n_head * d_v) # context: [batch_size, seq_len, n_head * d_v]
output = self.linear(context)
output = self.norm(output + residual)
return output, attention_map
class PoswiseFeedForwardNet(nn.Module):
def __init__(self):
super(PoswiseFeedForwardNet, self).__init__()
self.fc1 = nn.Linear(d_model, d_ff)
self.fc2 = nn.Linear(d_ff, d_model)
self.relu = nn.ReLU()
def forward(self, x):
# (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_ff) -> (batch_size, seq_len, d_model)
return self.fc2(self.relu(self.fc1(x)))
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer, self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.pos_ffn = PoswiseFeedForwardNet()
self.attention_map = None
def forward(self, enc_inputs, enc_self_attn_mask):
enc_outputs, attention_map = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs,
enc_self_attn_mask) # enc_inputs to same Q,K,V
self.attention_map = attention_map
enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size, seq_len, d_model]
return enc_outputs
class BERT(nn.Module):
def __init__(self):
super(BERT, self).__init__()
global max_len, n_layers, n_head, d_model, d_ff, d_k, d_v, vocab_size, device
max_len = 2048
n_layers = 3
n_head = 8
d_model = dim
d_ff = 64
d_k = 32
d_v = 32
vocab_size = n_word
self.embedding = Embedding()
self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
self.fc_task = nn.Sequential(
nn.Linear(d_model, d_model // 2),
nn.Dropout(0.1),
nn.ReLU(),
nn.Linear(d_model // 2, 2),
)
self.classifier = nn.Linear(2, 2)
def forward(self, input_ids):
# input_ids[batch_size, seq_len] like[8,1975]
output = self.embedding(input_ids) # [batch_size, seq_len, d_model]
enc_self_attn_mask = get_attn_pad_mask(input_ids) # [batch_size, maxlen, maxlen]
for layer in self.layers:
output = layer(output, enc_self_attn_mask)
return output
"""GNN"""
class GNN(nn.Module):
def __init__(self):
super(GNN, self).__init__()
self.W_gnn = nn.ModuleList([nn.Linear(dim, dim) for _ in range(layer_gnn)])
def forward(self, xs, A, layer):
for i in range(layer):
hs = torch.relu(self.W_gnn[i](xs))
xs = xs + torch.matmul(A, hs)
return torch.unsqueeze(torch.mean(xs, 0), 0)
"""AE2"""
class AENet(nn.Module):
def __init__(self, inputDim, hiddenDim, prelr, totlr):
super(AENet, self).__init__()
self.enfc = nn.Linear(inputDim, hiddenDim)
self.defc = nn.Linear(hiddenDim, inputDim)
def encoder(self, x):
return torch.sigmoid(self.enfc(x))
def decoder(self, zHalf):
return torch.sigmoid(self.defc(zHalf))
def totolTrainOnce(self, trainDataList, g, lamda):
g = torch.autograd.Variable(g, requires_grad=False)
trainLoader = DataLoader(
dataset=TensorDataset(trainDataList, g),
batch_size=1,
shuffle=True
)
for x, g in trainLoader:
x = x.float()
zHalf = self.encoder(x)
z = self.decoder(zHalf)
return z
class DGNet(nn.Module):
def __init__(self, targetDim, hiddenDim, lr=0.001):
super(DGNet, self).__init__()
self.dgfc = nn.Linear(targetDim, hiddenDim)
def degradation(self, h):
return torch.sigmoid(self.dgfc(h))
def totalTrainDgOnce(self, hList, zHalfList, lamda):
hList = torch.autograd.Variable(hList, requires_grad=False)
zHalfList = torch.autograd.Variable(zHalfList, requires_grad=False)
trainLoader = DataLoader(
dataset=TensorDataset(hList, zHalfList),
batch_size=1,
shuffle=True
)
for h, zHalf in trainLoader:
g = self.degradation(h)
return g
class Autoencoder(nn.Module):
def __init__(self, dimList, targetDim, hiddenDim=100, preTrainLr=0.001,
aeTotleTrainLr=0.001, dgTotleTrainLr=0.001, lamda=1.0, HTrainLr=0.1):
super(Autoencoder, self).__init__()
self.viewNum = 0
self.nSample = 1
self.lamda = lamda
self.HTrainLr = HTrainLr
self.aeNetList = [AENet(d, hiddenDim, preTrainLr, aeTotleTrainLr).cuda() for d in dimList]
self.dgNetList = [DGNet(targetDim, hiddenDim, dgTotleTrainLr).cuda() for d in dimList]
self.H = nn.Parameter(torch.FloatTensor(np.random.uniform(0, 1, [self.nSample, targetDim])))
self.input = []
self.output = []
def forward(self, trainDataList, nSample=1):
# totleTrain
self.nSample = nSample
self.viewNum = len(trainDataList) # 1
# 1.Update aenets
g = [dgnet.degradation(self.H) for dgnet in self.dgNetList]
for v in range(self.viewNum):
self.aeNetList[v].totolTrainOnce(trainDataList[v], g[v], self.lamda)
# 2.Update dgnets&AE2
for v in range(self.viewNum):
zHalfList = self.aeNetList[v].encoder(trainDataList[v].float())
# 2.1 Update denets
self.dgNetList[v].totalTrainDgOnce(self.H, zHalfList, self.lamda)
# 2.2 Update AE2
tmpZHalfList = torch.autograd.Variable(zHalfList, requires_grad=False)
trainLoader = DataLoader(
dataset=TensorDataset(self.H, tmpZHalfList),
batch_size=100,
shuffle=True
)
for h, zHalf in trainLoader:
self.input = zHalf
self.output = self.dgNetList[v].degradation(h)
return self.H, self.input, self.output
def getH(self):
return self.H
"""MDL-CPI model"""
class ABG(nn.Module):
def __init__(self):
super(ABG, self).__init__()
self.Bert = BERT()
self.GNN = GNN()
self.Autoencoder = Autoencoder(dimList, dimOut)
self.embed_fingerprint = nn.Embedding(n_fingerprint, dim)
self.embed_word = nn.Embedding(n_word, dim)
self.W_cnn = nn.ModuleList([nn.Conv2d(
in_channels=1, out_channels=1, kernel_size=2 * window + 1,
stride=1, padding=window) for _ in range(layer_cnn)])
self.W_attention = nn.Linear(dim, dim)
self.W_outChange = nn.ModuleList([nn.Linear(dimChange, dimChange)
for _ in range(layer_output)])
self.W_interactionChange = nn.Linear(dimChange, 2)
def cnn(self, x, xs, layer):
xs = torch.unsqueeze(torch.unsqueeze(xs, 0), 0)
for i in range(layer):
xs = torch.relu(self.W_cnn[i](xs))
xs = torch.squeeze(torch.squeeze(xs, 0), 0)
h = torch.relu(self.W_attention(x))
hs = torch.relu(self.W_attention(xs))
weights = torch.tanh(F.linear(h, hs))
ys = torch.t(weights) * hs
return torch.unsqueeze(torch.mean(ys, 0), 0)
def forward(self, data, ifTrain=True):
correct_interaction = data[-1]
fingerprints, adjacency, words = data[:-1]
"""Compound vector with GNN."""
fingerprint_vectors = self.embed_fingerprint(fingerprints)
compound_vector = self.GNN(fingerprint_vectors, adjacency, layer_gnn)
"""update AE2"""
fusion_vector, aeInput, aeOutput = self.Autoencoder([compound_vector])
HTrainOptimizor = optim.Adam([self.Autoencoder.H], lr=lr_auto)
loss = F.mse_loss(aeOutput, aeInput)
HTrainOptimizor.zero_grad()
loss = loss.requires_grad_()
loss.backward()
HTrainOptimizor.step()
"""Protein vector with BERT-CNN."""
protein_vectors = self.Bert(words.unsqueeze(0))
protein_vector = self.cnn(compound_vector, protein_vectors, layer_cnn)
"""update AE2"""
fusion_vector, aeInput, aeOutput = self.Autoencoder([protein_vector])
HTrainOptimizor = optim.Adam([self.Autoencoder.H], lr=lr_auto)
loss = F.mse_loss(aeOutput, aeInput)
HTrainOptimizor.zero_grad()
loss = loss.requires_grad_()
loss.backward()
HTrainOptimizor.step()
"""Fusion vector with AE2."""
# updated data
with torch.no_grad():
compound_vector = self.GNN(fingerprint_vectors, adjacency, layer_gnn)
protein_vector = self.Bert(words.unsqueeze(0))
fusion_vector = self.Autoencoder.getH()
"""Concatenate """
cat_vector = torch.cat((compound_vector,
protein_vector, fusion_vector), 1)
cat_vector = cat_vector.to(torch.float32)
'''Predict Module'''
for j in range(layer_output):
cat_vector = torch.relu(self.W_outChange[j](cat_vector))
interaction = self.W_interactionChange(cat_vector)
if ifTrain:
loss = F.cross_entropy(interaction, correct_interaction)
return loss
else:
correct_labels = correct_interaction.to('cpu').data.numpy()
ys = F.softmax(interaction, 1).to('cpu').data.numpy()
predicted_labels = list(map(lambda x: np.argmax(x), ys))
predicted_scores = list(map(lambda x: x[1], ys))
return correct_labels, predicted_labels, predicted_scores
class Trainer(object):
def __init__(self, model):
self.model = model
self.optimizer = optim.Adam(self.model.parameters(),
lr=lr, weight_decay=weight_decay)
def train(self, dataset):
np.random.shuffle(dataset)
N = len(dataset)
loss_total = 0
for data in dataset:
loss = self.model(data)
self.optimizer.zero_grad()
loss = loss.requires_grad_()
loss.backward()
self.optimizer.step()
loss_total += loss.to('cpu').data.numpy()
return loss_total
class Tester(object):
def __init__(self, model):
self.model = model
def test(self, dataset):
N = len(dataset)
T, Y, S = [], [], []
with torch.no_grad():
correct = 0
total = 0
for data in dataset:
(correct_labels, predicted_labels,
predicted_scores) = self.model(data, False)
correct += (predicted_labels == correct_labels).sum()
total += len(correct_labels)
T.append(correct_labels)
Y.append(predicted_labels)
S.append(predicted_scores)
res = [T, Y, S]
acc = correct / total
AUC = roc_auc_score(T, S)
precision = precision_score(T, Y)
recall = recall_score(T, Y)
tpr, fpr, _ = precision_recall_curve(T, S)
PRC = auc(fpr, tpr)
return AUC, PRC, precision, recall, acc, res
def save_AUCs(self, AUCs, filename):
with open(filename, 'a') as f:
f.write('\t'.join(map(str, AUCs)) + '\n')
def save_model(self, model, filename):
torch.save(model.state_dict(), filename)
def load_tensor(file_name, dtype):
return [dtype(d).to(device) for d in np.load(file_name + '.npy', allow_pickle=True)]
def load_datafile(file_name, ceng):
csv_reader = csv.reader(open(file_name, encoding='utf-8'))
newdata = []
for row in csv_reader:
newhang = []
for d in row:
newhang.append(float(d))
x = []
for i in range(ceng):
x.append(newhang)
newdata.append(x)
tmp = np.array(newdata)
return torch.from_numpy(tmp)
def load_npy_datalist(dir_input):
compounds = load_tensor(dir_input + 'compounds', torch.LongTensor)
adjacencies = load_tensor(dir_input + 'adjacencies', torch.FloatTensor)
proteins = load_tensor(dir_input + 'proteins', torch.LongTensor)
interactions = load_tensor(dir_input + 'interactions', torch.LongTensor)
fingerprint_dict = load_pickle(dir_input + 'fingerprint_dict.pickle')
word_dict = load_pickle(dir_input + 'word_dict.pickle')
n_fingerprint = len(fingerprint_dict)
n_word = len(word_dict)
return compounds, adjacencies, proteins, interactions, \
fingerprint_dict, word_dict, n_fingerprint, n_word
def load_pickle(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
def shuffle_dataset(dataset, seed):
np.random.seed(seed)
np.random.shuffle(dataset)
return dataset
def split_dataset(dataset, ratio):
n = int(ratio * len(dataset))
dataset_1, dataset_2 = dataset[:n], dataset[n:]
return dataset_1, dataset_2
if __name__ == "__main__":
"""Hyperparameters."""
(DATASET, radius, ngram, dim, layer_gnn, window, layer_cnn, layer_output,
lr, lr_decay, decay_interval, weight_decay, iteration,
setting) = sys.argv[1:]
(dim, layer_gnn, window, layer_cnn, layer_output, decay_interval,
iteration) = map(int, [dim, layer_gnn, window, layer_cnn, layer_output,
decay_interval, iteration])
lr, lr_decay, weight_decay = map(float, [lr, lr_decay, weight_decay])
dimList = [dim]
global dimOut, lr_auto
dimOut = 8
lr_auto = 0.1
dimChange = dim + dim + dimOut
""" About """
about = 'MDL-CPI'
"""CPU or GPU."""
if torch.cuda.is_available():
torch.cuda.set_device(0)
print('|' + '\t' * 6 + 'torch.cuda.current_device:' + str(torch.cuda.current_device()))
device = torch.device('cuda')
print('|' + '\t' * 6 + 'The code uses GPU...')
else:
device = torch.device('cpu')
print('|' + '\t' * 6 + 'The code uses CPU!!!')
print('|' + '-' * 2 + 'MDL-CPI Hyperparameters setting OVER')
"""Load preprocessed data."""
global n_word
dir_input = 'xxxxxxxxxx/dataset/' + DATASET + '/input/radius2_ngram3/'
compounds, adjacencies, proteins, interactions, \
fingerprint_dict, word_dict, n_fingerprint, n_word = load_npy_datalist(dir_input)
print('|' + '-' * 2 + 'MDL-CPI data load OVER')
"""Create a dataset and split it into train/dev/test..."""
dataset = list(zip(compounds, adjacencies, proteins, interactions))
dataset = shuffle_dataset(dataset, 1234)
dataset_train, dataset_ = split_dataset(dataset, 0.8)
dataset_dev, dataset_test = split_dataset(dataset_, 0.5)
"""Set a model."""
torch.manual_seed(1234)
model = ABG().to(device)
trainer = Trainer(model)
tester = Tester(model)
"""Output files."""
file_AUCs = './output/result/'
file_model = './output/model/'
if not os.path.exists(file_AUCs):
os.makedirs(file_AUCs)
if not os.path.exists(file_model):
os.makedirs(file_model)
file_AUCs = './output/result/' + about + 'AUCs--' + setting + '.txt'
file_model = './output/model/' + about + 'model_' + about + setting
AUCs = 'Epoch\tTime(sec)\t\tLoss_train\t\t\tAUC_dev\t\t\tACC_dev\t\t\t' \
'AUC\t\t\tPRC\t\t\tPrecision\t\t\tRecall\t\t\tACC'
with open(file_AUCs, 'w') as f:
f.write(AUCs + '\n')
print('|' + '-' * 2 + 'MDL-CPI model setting OVER')
"""Start training."""
print('|' + '-' * 2 + 'MDL-CPI train START')
print('|' + '\t' * 6 + AUCs)
start = timeit.default_timer()
# TAO
results = [[], [], []]
for epoch in range(1, iteration):
if epoch % decay_interval == 0:
trainer.optimizer.param_groups[0]['lr'] *= lr_decay # 在训练中动态的调整学习率
loss_train = trainer.train(dataset_train)
AUC_dev, PRC_dev, precision_dev, recall_dev, acc_dev, res = tester.test(dataset_dev)
AUC_test, PRC_test, precision_test, recall_test, acc_test, res = tester.test(dataset_test)
end = timeit.default_timer()
time = end - start
AUCs = [epoch, time, loss_train, AUC_dev, acc_dev,
AUC_test, PRC_test, precision_test, recall_test, acc_test]
tester.save_AUCs(AUCs, file_AUCs)
tester.save_model(model, file_model)
results[0].extend(res[0])
results[1].extend(res[1])
results[2].extend(res[2])
print('|' + '\t' * 6 + '\t'.join(map(str, AUCs)))
print('|' + '-' * 2 + 'MDL-CPI train END')
print("results\n")
print(results)
print('|' + '-' * 2 + about + "ALL FINISH !!! ")
| UTF-8 | Python | false | false | 20,215 | py | 4 | MDL-CPI.py | 3 | 0.568273 | 0.559754 | 0 | 558 | 34.184588 | 121 |
khucnam/Efflux_TransVAE | 16,595,753,660,191 | 8e56915b1a9707b63871f066cc1113d636886374 | acf6e80380499e82ecc657244c03d3c187194969 | /tests/models/test_transformer_vae.py | b7ad571e0d667de31dc601dbccb341afcb7db9a1 | [
"MIT"
] | permissive | https://github.com/khucnam/Efflux_TransVAE | 4ea661b385d62b90fc00a8676b1d78109bd37799 | 7da1cc614f016d5520648f4853e34e2362181aa7 | refs/heads/master | 2023-04-07T07:56:20.289235 | 2021-04-15T23:49:40 | 2021-04-15T23:49:40 | 355,948,455 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import torch
from torch import optim
from tvae.models import TransformerVAE
class TestTransformerVAEMethods(unittest.TestCase):
def setUp(self):
self.max_seq_len = 100
self.max_vocab_size = 100
self.latent_size = 100
self.model = TransformerVAE(self.max_seq_len, self.max_vocab_size, latent_size=self.latent_size,
num_layers=1, dim_m=32, dim_i=64)
self.optimizer = optim.Adam(self.model.learnable_parameters())
def test_init(self):
self.assertIsInstance(self.model, TransformerVAE)
def test_forward(self):
input_sequence = torch.randint(0, 100, (8, 42))
output_seq_distr, mu, logvar = self.model(input_sequence)
self.assertTupleEqual(output_seq_distr.shape, (8, 41, self.max_vocab_size))
self.assertTupleEqual(mu.shape, (8, self.latent_size))
self.assertTupleEqual(logvar.shape, (8, self.latent_size))
def test_inference_sequence(self):
input_sequence = torch.randint(0, 100, (8, 42))
generated_seq, generated_seq_distr, z = self.model.inference(input_sequence, limit=50)
self.assertTupleEqual(generated_seq.shape, (8, 50))
self.assertTupleEqual(generated_seq_distr.shape, (8, 50, self.max_vocab_size))
self.assertTupleEqual(z.shape, (8, self.latent_size))
def test_inference_z(self):
z = torch.randn((8, self.latent_size))
generated_seq, generated_seq_distr, z = self.model.inference(z=z, limit=50)
self.assertTupleEqual(generated_seq.shape, (8, 50))
self.assertTupleEqual(generated_seq_distr.shape, (8, 50, self.max_vocab_size))
self.assertTupleEqual(z.shape, (8, self.latent_size))
def test_inference_invalid_input(self):
with self.assertRaises(AssertionError):
self.model.inference()
def test_trainer(self):
data = [{"src": torch.randint(0, 100, (8, 42))}]
if torch.cuda.is_available():
device = torch.device("cuda")
self.model.to(device)
else:
device = torch.device("cpu")
trainer = self.model.create_trainer(self.optimizer, device)
self.assertIsNotNone(trainer)
state = trainer.run(data)
kld, ce, loss = state.kld, state.ce, state.loss
self.assertIsInstance(kld, float)
self.assertIsInstance(ce, float)
self.assertIsInstance(loss, float)
def test_evaluator(self):
data = [{"src": torch.randint(0, 100, (8, 42))}]
if torch.cuda.is_available():
device = torch.device("cuda")
self.model.to(device)
else:
device = torch.device("cpu")
data = [{"src": torch.randint(0, 100, (8, 42))}]
evaluator = self.model.create_evaluator(device)
state = evaluator.run(data)
generated, original = state.output
self.assertTupleEqual(generated.shape, (8, 41, self.max_vocab_size))
self.assertTupleEqual(original.shape, (8, 41))
| UTF-8 | Python | false | false | 3,025 | py | 23 | test_transformer_vae.py | 10 | 0.630083 | 0.603967 | 0 | 79 | 37.291139 | 104 |
moleculea/autoscale_sample | 13,692,355,780,835 | 4b3f9379b0cbe663486f687e33875aabc94c48fc | d89240711278be6228c4241e2b363aabd3f9b556 | /autoscale_sample.py | ad3a2ca6dd630034fa1f1d31f779a5b84b41aeb4 | [] | no_license | https://github.com/moleculea/autoscale_sample | d94cdef8b8764cb89e9b38ac2e7f5170e70f5ec8 | b3a242cad7ee516ff0ce64dd3fbdf91facfa04d6 | refs/heads/master | 2020-06-02T08:31:06.960974 | 2013-10-26T03:20:38 | 2013-10-26T03:20:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto.ec2
from boto.ec2.autoscale import (AutoScalingGroup,
LaunchConfiguration, ScalingPolicy)
from boto.ec2.cloudwatch import MetricAlarm
# Create connections to Auto Scaling and CloudWatch
as_conn = boto.ec2.autoscale.connect_to_region("us-east-1")
cw_conn = boto.ec2.cloudwatch.connect_to_region("us-east-1")
# Name for auto scaling group and launch configuration
as_name = "VM1"
# Create launch configuration
lc = LaunchConfiguration(name=as_name,
image_id="ami-76f0061f", # AMI ID of your instance
key_name="your_key_name",
security_groups="your_group_name",
instance_type="t1.micro",
instance_monitoring=True)
as_conn.create_launch_configuration(lc)
# Create Auto Scaling group
ag = AutoScalingGroup(group_name=as_name,
availability_zones=["us-east-1b"],
launch_config=lc, min_size=0,
max_size=2,
connection=as_conn)
as_conn.create_auto_scaling_group(ag)
# Create scaling policies
scale_up_policy = ScalingPolicy(
name='scale_up', adjustment_type='ChangeInCapacity',
as_name=as_name, scaling_adjustment=1, cooldown=180)
scale_down_policy = ScalingPolicy(
name='scale_down', adjustment_type='ChangeInCapacity',
as_name=as_name, scaling_adjustment=-1, cooldown=180)
as_conn.create_scaling_policy(scale_up_policy)
as_conn.create_scaling_policy(scale_down_policy)
scale_up_policy = as_conn.get_all_policies(
as_group=as_name, policy_names=['scale_up'])[0]
scale_down_policy = as_conn.get_all_policies(
as_group=as_name, policy_names=['scale_down'])[0]
# Set dimensions for CloudWatch alarms
# Monitor on a specific instance
alarm_dimensions = {"InstanceId": "your_instance_id"}
# Monitor instances within the Auto Scaling group cluster
alarm_dimensions_as = {"AutoScalingGroupName": as_name}
# Create metric alarms
scale_up_alarm = MetricAlarm(
name='scale_up_on_cpu_' + as_name, namespace='AWS/EC2',
metric='CPUUtilization', statistic='Average',
comparison='>', threshold="80",
period='60', evaluation_periods=2,
alarm_actions=[scale_up_policy.policy_arn],
dimensions=alarm_dimensions)
scale_down_alarm = MetricAlarm(
name='scale_down_on_cpu_' + as_name, namespace='AWS/EC2',
metric='CPUUtilization', statistic='Average',
comparison='<', threshold="20",
period='60', evaluation_periods=2,
alarm_actions=[scale_down_policy.policy_arn],
dimensions=alarm_dimensions)
# Create alarm in CloudWatch
cw_conn.create_alarm(scale_up_alarm)
cw_conn.create_alarm(scale_down_alarm)
| UTF-8 | Python | false | false | 2,714 | py | 1 | autoscale_sample.py | 1 | 0.67465 | 0.659912 | 0 | 77 | 34.246753 | 76 |
sirdesmond09/mypython- | 12,799,002,581,269 | 784dd3f75fb6ebea2b29acd8db337b521b028998 | 4e7c27ebca4627d8961ba842152de1130c7952b6 | /list.py | 347291cf048f171f2f8ab200d84d3cd3e07fd020 | [] | no_license | https://github.com/sirdesmond09/mypython- | db0f6de93142ce7fc052047fe8de8d7915749f96 | adf77c84a13c1ecb5e35256a0ac09bfa3effe585 | refs/heads/master | 2020-12-26T06:04:07.013448 | 2020-01-31T10:49:50 | 2020-01-31T10:49:50 | 237,410,827 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | list=['physics', 'chemistry', 1997, 2000];
print("value at index 2:")
print (list[2])
list[2]= 2001 #changing the value of list 2
print("new value at index 2:")
print (list[2])
print(len(list))
| UTF-8 | Python | false | false | 194 | py | 19 | list.py | 18 | 0.675258 | 0.582474 | 0 | 7 | 26.714286 | 43 |
mndimitrov92/ds-and-design-patterns | 1,176,821,039,933 | 95b13e9e6da58560d1249a04fb63b7117d99de7f | 7b6313100e6da726f47119d4e83958cc259f91b4 | /Structural_patterns/flyweight_dp.py | b8d059c373acc7d7b85d4748b8f2ea9516b0ae49 | [] | no_license | https://github.com/mndimitrov92/ds-and-design-patterns | bb1a1ec626a39c770a6eb660c4af312f9aefc4b5 | 7a584c8fc5318393e724db9150edf587262fdd0e | refs/heads/master | 2023-08-25T18:34:09.194724 | 2021-10-27T07:36:11 | 2021-10-27T07:36:11 | 266,281,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Flyweight design pattern
INTENT:
Flyweight is a structural design pattern that lets you fit more objects into the available amount of RAM by sharing
common parts of state between multiple objects instead of keeping all of the data in each object.
Generally it is a space optimization technique that lets us use less memory by storing externally the data
associated with similar objects.
APPLICABILITY:
Use the Flyweight pattern only when your program must support a huge number of objects which barely fit into available RAM.
The benefit of applying the pattern depends heavily on how and where it’s used. It’s most useful when:
*an application needs to spawn a huge number of similar objects
*this drains all available RAM on a target device
*the objects contain duplicate states which can be extracted and shared between multiple objects
PROS AND CONS:
PROS:
*You can save lots of RAM, assuming your program has tons of similar objects.
CONS:
*You might be trading RAM over CPU cycles when some of the context data needs to be recalculated each time somebody calls a flyweight method.
*The code becomes much more complicated. New team members will always be wondering why the state of an entity was separated in such a way.
USAGE:
The Flyweight pattern has a single purpose: minimizing memory intake. If your program doesn’t struggle
with a shortage of RAM, then you might just ignore this pattern for a while.
IDENTIFICATION:
Flyweight can be recognized by a creation method that returns cached objects instead of creating new.
"""
import random
import string
def random_string():
"""Generate a random string with 8 characters that will be used for the first and last name of the user"""
chars = string.ascii_lowercase
return "".join([random.choice(chars) for x in range(8)])
# First Flyweight example for storing user names
class User:
"""Ordinary class """
def __init__(self, name):
self.name = name
class UserWithFlyweight:
# we first need a static variable to store the inputs
strings = []
def __init__(self, name):
self.names = [self.get_or_add(x) for x in name.split(" ")]
def get_or_add(self, s):
if s in self.strings:
# get the index of the string if it is present
return self.strings.index(s)
else:
# otherwise append it
self.strings.append(s)
return len(self.strings) - 1
def __str__(self):
return " ".join([self.strings[x] for x in self.names])
def test_user_generation():
# Usage without the flyweight pattern
users = []
first_names = [random_string() for _ in range(100)]
last_names = [random_string() for _ in range(100)]
# Generate 10 000 users with the ordinary class
for first in first_names:
for last in last_names:
users.append(User(f"{first} {last}"))
# Test Flyweight
u1 = UserWithFlyweight("Jim Jones")
u2 = UserWithFlyweight("Tom Jones")
print(u1.names)
print(u2.names)
print(UserWithFlyweight.strings)
flyweight_users = []
for first in first_names:
for last in last_names:
flyweight_users.append(UserWithFlyweight(f"{first} {last}"))
# Using Flyweight for text formatting
class RegularTextFormatter:
def __init__(self, plain_text):
self.plain_text = plain_text
# Create an array with bool values for capitalization corresponding to each letter of the text
self.caps = [False] * len(self.plain_text)
def capitalize(self, start, end):
"""Sets the capitalization marker of the letters in a given range"""
for x in range(start, end):
self.caps[x] = True
def __str__(self):
result = []
for x in range(len(self.plain_text)):
# Capture the current character
c = self.plain_text[x]
# append the uppercased version if the marker in the caps array is True
result.append(c.upper() if self.caps[x] else c)
return "".join(result)
class FlyweightTextFormatter:
def __init__(self, plain_text):
self.plain_text = plain_text
# A variable to store the formatting
self.formatting = []
# Create the flyweight inner class
class TextRange:
def __init__(self, start, end, capitalize=False, bold=False, italic=False):
self.start = start
self.end = end
self.capitalize = capitalize
self.bold = bold
self.italic = italic
def covers(self, position):
"""Check if the given position is within the range"""
return self.start <= position <= self.end
def get_range(self, start, end):
char_range = self.TextRange(start, end)
# Add the character range in the formatting variable
self.formatting.append(char_range)
return char_range
def __str__(self):
result = []
for x in range(len(self.plain_text)):
c = self.plain_text[x]
for r in self.formatting:
# If the letter is in the given range and has the capitalization flag, change it to uppercased
if r.covers(x) and r.capitalize:
c = c.upper()
result.append(c)
return "".join(result)
def test_text_formatter():
some_text = "This is a nice place."
rtf = RegularTextFormatter(some_text)
rtf.capitalize(5, 7)
print(rtf)
# Flyweight text formatter
ftf = FlyweightTextFormatter(some_text)
# set the capitalize flag for this range to True
ftf.get_range(10, 15).capitalize = True
print(ftf)
# Third flyweight exercise
# Given a string of words we need to create an interface to capitalize particular words from the string
class Sentence:
def __init__(self, text):
# Split the text into words
self.text = text.split(" ")
# Variable to hold the words
self.words = {}
class Word:
"""Flyweight to """
def __init__(self, capitalize=False):
self.capitalize = capitalize
def __getitem__(self, item):
word = self.Word()
# Add the marker to the dictionary and return it
self.words[item] = word
return self.words[item]
def __str__(self):
result = []
for i, w in enumerate(self.text):
# If the index of the word is in the dictionary and the words has the capitalize marker
if i in self.words and self.words[i].capitalize:
# Capitalize the word
w = w.upper()
# Append the output to the result
result.append(w)
return " ".join(result)
def test_word_capitalizer():
text = "Hello world"
s = Sentence(text)
# Capitalize the word on index 1
s[1].capitalize = True
print(s)
if __name__ == '__main__':
print("Test flyweight with user names:")
# test_user_generation()
print("Flyweight example with text formatting:")
test_text_formatter()
print("Test flyweight example 3:")
test_word_capitalizer() | UTF-8 | Python | false | false | 7,201 | py | 44 | flyweight_dp.py | 43 | 0.63975 | 0.635997 | 0 | 211 | 33.104265 | 149 |
luozixu/ATM | 14,800,457,331,642 | 8ebd0259aca1097b1e9f6139eefa6d234e2e76a3 | 71831a12819c944a3145d9ad9edfcd0de47ba950 | /atm/bin/manae.py | 2362fe38e34951c1890cff3c5c0c29eaee9026a4 | [] | no_license | https://github.com/luozixu/ATM | 9c09c1fac7b93d56e1b3e2e89b1d1d2421ce4f40 | 4f105c859e991c3ac812cf01b5e59eb442e30641 | refs/heads/master | 2020-08-06T20:16:41.945303 | 2019-10-06T09:18:32 | 2019-10-06T09:18:32 | 213,139,463 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #找到路径
sys.path.append(BASE_DIR) #添加路径
from core import main
'''管理程序的执行文件'''
if __name__ == "__main__":
main.run_manage()
| UTF-8 | Python | false | false | 266 | py | 11 | manae.py | 11 | 0.625 | 0.625 | 0 | 10 | 22.2 | 77 |
ayushchauhan09/My-Codes | 1,786,706,397,621 | 791daadf0e1caf04b639709fe37330317b051fbb | ba790632a5f5577eb05f4548e9d749c958a98426 | /Codes/Python/Decrement-OR-Increment.py | 5194f50e382bd894e6106f0381d9955b467824f7 | [
"MIT"
] | permissive | https://github.com/ayushchauhan09/My-Codes | f30489221e66132f2eb3bad9dc0ee3e0bd81081c | ce2fcd5826c25039555998c149657426bc2f1735 | refs/heads/main | 2023-04-15T17:17:27.685017 | 2021-04-20T05:33:25 | 2021-04-20T05:33:25 | 334,468,830 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | N = int(input())
if N%4==0:
print(N+1)
else:
print(N-1) | UTF-8 | Python | false | false | 67 | py | 42 | Decrement-OR-Increment.py | 41 | 0.477612 | 0.41791 | 0 | 5 | 11.8 | 16 |
ChiragJRana/Competitivecodingsolutions | 18,760,417,152,407 | 0c1e0e7fdea4a6879a373399e228a81404b3bdab | 07ed3e249802ed7d43c25b5d2c911f66b7d27760 | /LEADGAME.py | d47c33ab76d6b72f47b90be67fac24d8cc08d80e | [] | no_license | https://github.com/ChiragJRana/Competitivecodingsolutions | c4f7b20f146670d6c0ca12c9866ab562bf2d66d6 | b9af33869669d3cd183f181f73a317efaedf8309 | refs/heads/master | 2022-12-05T02:13:43.335580 | 2020-08-22T08:19:27 | 2020-08-22T08:19:27 | 280,814,203 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import stdin, stdout
def main():
N = int(input())
l1 = [0]
l2 = [0]
winner = max_val = 0
litofvalues = [tuple(map(int,stdin.readline().split())) for _ in range(N)]
for i in range(N):
l1.append(l1[-1] + litofvalues[i][0])
l2.append(l2[-1] + litofvalues[i][1])
if abs(l2[-1] - l1[-1]) > max_val:
winner = (2,1) [l1[-1] > l2[-1]]
max_val = abs(l2[-1] - l1[-1])
print(winner, max_val)
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 505 | py | 42 | LEADGAME.py | 42 | 0.489109 | 0.435644 | 0 | 18 | 27.111111 | 78 |
adichouhan14/python-assignment | 5,334,349,423,749 | ded2c42db8777ef5a50f8f7c545bcd9d20bdc7cd | 441c8de92e7deaf48dcf81a8ee7f6c71ca93dfa5 | /py assignments/module 3/validation_negative.py | a19c85fe61b91d9d9417d93bab430b663591d7c0 | [] | no_license | https://github.com/adichouhan14/python-assignment | 591f6a9cfb1b187181213a6eed7df22794a75529 | 36d096020530be8689a79f4c60e004608af5d61e | refs/heads/master | 2020-12-15T06:37:18.113221 | 2020-01-28T13:40:47 | 2020-01-28T13:40:47 | 235,022,599 | 0 | 1 | null | false | 2020-09-30T19:22:28 | 2020-01-20T05:03:34 | 2020-01-28T13:40:50 | 2020-01-28T13:40:48 | 20 | 0 | 1 | 1 | Python | false | false | wholeSaleValue=0.0
while(True):
wholeSale=float(input("Enter the Wholesale Price"))
if wholeSale<0:
continue
else:
wholeSaleValue+=wholeSale
ch=input("press Y to continue and N to exit")
if(ch=='n' or ch=='N'):
break
print("Retail Price = ",wholeSaleValue*0.5)
| UTF-8 | Python | false | false | 316 | py | 59 | validation_negative.py | 52 | 0.610759 | 0.594937 | 0 | 11 | 26.727273 | 55 |
ISEAGE-ISU/cdc-signup | 1,108,101,594,160 | cf7f0b67cc9bba1367d4e5059c7d7459fafc2a85 | f5beb08c228140c590baadfbbdb84a205a56d93a | /base/migrations/0018_globalsettings_certificate_template.py | 8c661ae1a7b7c9a9e1194cf64b042d8abf005fc3 | [] | no_license | https://github.com/ISEAGE-ISU/cdc-signup | 35754a192acadad05e7fa132a4e5deec019f2054 | 83a8d3ee4df0d297728ec313312e6a06ef66c662 | refs/heads/master | 2020-05-21T03:23:57.417514 | 2019-09-13T20:59:02 | 2019-09-13T20:59:02 | 23,407,657 | 0 | 4 | null | false | 2014-09-16T21:45:16 | 2014-08-27T22:29:55 | 2014-09-06T06:40:12 | 2014-09-16T21:45:16 | 1,072 | 0 | 0 | 0 | CSS | null | null | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0017_archivedemail_sender'),
]
operations = [
migrations.AddField(
model_name='globalsettings',
name='certificate_template',
field=models.FileField(null=True, upload_to=b'', blank=True),
),
]
| UTF-8 | Python | false | false | 405 | py | 61 | 0018_globalsettings_certificate_template.py | 40 | 0.582716 | 0.57037 | 0 | 19 | 20.315789 | 73 |
Susannnn/Stock-Price-Prediction | 15,994,458,232,130 | 44ee0678a3f94724e6b669b047979a6afb6827ae | 6d52f2044ddbeed2223712d5b233b626784ff605 | /MLP.py | 55550d8a5906a53d5d4e0e2e723f70ad648b544c | [] | no_license | https://github.com/Susannnn/Stock-Price-Prediction | ddf691d01a3a08e06687b653926f5746c57c8917 | 289e07bb3ba4c2cc1ffc6093647194c7b81a42b3 | refs/heads/master | 2021-03-13T12:32:13.297082 | 2020-03-11T21:18:27 | 2020-03-11T21:18:27 | 246,681,752 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 21:03:15 2020
@author: huangjinghua
"""
import math
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import r2_score
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
df = pd.read_csv('S&P500.csv')
#Select date variable
data = df.filter(['Adj Close'])
#Make data a np.array
data = data.values
training_data_len = math.ceil( len(data) * .8 )
#Rescale the data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data)
#Create training set and test set
data_train = scaled_data[0:training_data_len , :]
data_test = scaled_data[training_data_len - 5: , :]
#Build x_train and y_train
x_train = []
y_train = []
for i in range(5, len(data_train)):
x_train.append(data_train[i-5:i, 0])
y_train.append(data_train[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
#Build the MLP model
model = Sequential()
model.add(Dense(10, input_dim=5, activation='relu'))
model.add(Dense(5, activation='relu'))
model.add(Dense(1, activation='linear'))
#Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
#Train the model
model.fit(x_train, y_train, batch_size=10, epochs=50)
#Create x_test and y_test
x_test = []
y_test = scaled_data[training_data_len:, 0]
for i in range(5, len(data_test)):
x_test.append(data_test[i-5:i, 0])
#Convert the data to a numpy array
x_test = np.array(x_test)
#Get the models predicted price values
predictions = model.predict(x_test, verbose=1)
#Get the root mean squared error (RMSE)
rmse = np.sqrt(np.mean(((predictions - y_test)**2)))
print ('rmse = ', rmse)
#Get the standard deviation
std = np.std(predictions - y_test)
print ('std = ', std)
#Get R Squared
print ('r2 = ', r2_score(y_test, predictions))
| UTF-8 | Python | false | false | 1,870 | py | 3 | MLP.py | 3 | 0.696257 | 0.673262 | 0 | 80 | 22.3625 | 58 |
walkersaurus/GhostDice | 5,660,766,939,714 | 8cbd329a0cc11deb2125bbd6e5262c54430c062a | 0055682cd7e0e197f8ab71ae089e2abc16864781 | /dice_bot.py | 0b0b1bea603762df0ab8b97a5edee9ec6b18e33a | [] | no_license | https://github.com/walkersaurus/GhostDice | 3030511b8bc26806d6611826f76f7e03fd5f45fb | d81d426296e66c6ef041c1e5201952892070ffb2 | refs/heads/master | 2020-03-27T08:17:33.656350 | 2018-08-27T03:19:20 | 2018-08-27T03:19:20 | 146,240,808 | 0 | 0 | null | false | 2018-08-29T01:27:37 | 2018-08-27T03:11:15 | 2018-08-27T03:19:22 | 2018-08-29T01:27:32 | 5 | 0 | 0 | 1 | Python | false | null | # https://github.com/Rapptz/discord.py/blob/async/examples/reply.py
import discord
import drew_dbot
import re
TOKEN = 'NDc3MzIwODY1OTIzNzI3Mzcz.Dk6k1A.HdpX4joH5qrtLu-O7AgcBwfaQbQ'
client = discord.Client()
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
if message.content.startswith('!hello'):
msg = 'Hello {0.author.mention}'.format(message)
await client.send_message(message.channel, msg)
if message.content.startswith('/roll'):
try:
qty = [int(n) for n in message.content.split() if n.isdigit()]
if qty < 666:
msg = 'Rolling ' + qty[0] + ' dice...You got ' + drew_dbot.GhostyMcGhostface(qty)
else:
msg = 'That\'s too many dice. I only have 111 arms. Yeah that\'s fifty times more than you have (assuming you have two), but even robots have their limitations.'
except:
msg = "Roll what? That's not even a number."
finally:
await client.send_message(message.channel, msg)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN) | UTF-8 | Python | false | false | 1,181 | py | 3 | dice_bot.py | 2 | 0.684166 | 0.670618 | 0 | 39 | 28.333333 | 165 |
GeminiDRSoftware/DRAGONS | 18,202,071,420,108 | 88cc66e32c4e0ec1ffcad9b9395bc08578ac3563 | de476f64c81636e8b88cfaeaedfb2854bcc88993 | /geminidr/gmos/recipes/qa/recipes_IMAGE.py | e3e28827861879762201b6a8ecc59df5232209cc | [
"BSD-2-Clause"
] | permissive | https://github.com/GeminiDRSoftware/DRAGONS | f07ed39f36a03fd64c5d4e94cc7f0b287bb77719 | 159439b43029d0fd9136e4d30e10fa963d6f9e7f | refs/heads/master | 2023-08-03T08:01:16.407891 | 2023-07-29T02:07:47 | 2023-07-31T23:59:13 | 105,302,305 | 28 | 22 | NOASSERTION | false | 2023-07-06T21:28:08 | 2017-09-29T18:00:10 | 2023-04-28T00:29:24 | 2023-07-06T20:53:12 | 137,493 | 24 | 16 | 50 | HTML | false | false | """
Recipes available to data with tags ['GMOS', 'IMAGE'].
Default is "reduce_nostack".
"""
recipe_tags = {'GMOS', 'IMAGE'}
blocked_tags = {'THRUSLIT'}
def reduce(p):
"""
This recipe performs the standardization and corrections needed to
convert the raw input science images into a stacked image.
QA metrics are being calculated at different point during the reduction.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
#p.addIllumMaskToDQ()
p.addVAR(read_noise=True)
p.detectSources()
p.addReferenceCatalog()
p.determineAstrometricSolution()
p.measureIQ(display=True)
p.measureBG()
p.measureCC()
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.flatCorrect()
p.makeFringeForQA()
p.fringeCorrect()
p.mosaicDetectors()
p.detectSources()
p.determineAstrometricSolution()
p.measureIQ(display=True)
p.measureBG()
p.measureCC()
p.addToList(purpose='forStack')
p.getList(purpose='forStack')
p.adjustWCSToReference()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
p.detectSources()
p.determineAstrometricSolution()
p.measureIQ(display=True)
p.measureBG()
p.measureCC()
p.writeOutputs()
return
def reduce_nostack(p):
"""
This recipe performs the standardization and corrections needed to
convert the raw input science images into an image ready to be stacked.
QA metrics are being calculated at different point during the reduction.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
#p.addIllumMaskToDQ()
p.addVAR(read_noise=True)
p.detectSources()
p.measureIQ(display=True)
p.measureBG()
p.addReferenceCatalog()
p.determineAstrometricSolution()
p.measureCC()
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.flatCorrect()
p.writeOutputs()
p.makeFringeForQA()
p.fringeCorrect()
p.mosaicDetectors()
p.detectSources()
p.measureIQ(display=True)
p.measureBG()
p.determineAstrometricSolution()
p.measureCC()
p.addToList(purpose='forStack')
p.writeOutputs()
return
# we have to use the nostack version for qap because stacking is too slow.
# KL: is this still true with gemini_python 2.0?
# KRA: unknown yet.
_default = reduce_nostack
def stack(p):
"""
This recipe stacks images already reduced up to stacking. It will
collect data marked "forStack", for example the output of
reduce_nostack. The product is a stack of the aligned inputs with
suffix "_stack". QA metrics are measured.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.getList(purpose='forStack')
p.adjustWCSToReference()
p.resampleToCommonFrame()
#p.correctBackgroundToReference()
p.scaleCountsToReference()
p.stackFrames()
p.detectSources()
p.measureIQ(display=True)
p.measureBG()
p.determineAstrometricSolution()
p.measureCC()
p.writeOutputs()
return
def makeProcessedFringe(p):
"""
This recipe performs the standardization and corrections needed to
convert the raw input fringe images into a single stacked fringe
image. This output processed fringe is stored on disk using
storeProcessedFringe and has a name equal to the name of the first
input fringe image with "_fringe.fits" appended.
Fringe frames are normally generated with normal science data. There
isn't a keyword identifying raw frames as fringe frames. Therefore
we cannot put this recipe in a set specific to a fringe tag.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
#p.addIllumMaskToDQ()
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.flatCorrect()
p.addToList(purpose="forFringe")
p.getList(purpose="forFringe")
p.makeFringeFrame()
p.storeProcessedFringe()
p.writeOutputs()
return
| UTF-8 | Python | false | false | 4,380 | py | 688 | recipes_IMAGE.py | 487 | 0.677169 | 0.676712 | 0 | 160 | 26.375 | 76 |
Dentosal/StackVM | 3,204,045,645,147 | df8e68ac9109b2d12aff8823426ad84bf3bc1f96 | 27b413d86fbbf4e829420fb5bb867aeba1fd55fa | /stackvm/heap.py | 0f3659732e588f8c3d2e47bc89f9f7b7349cfc16 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/Dentosal/StackVM | 5763259405a578b7a344d833b052e36f9a133bef | 655026bb890d52f756d30f340f8b2ad344741175 | refs/heads/master | 2021-01-01T18:04:32.700239 | 2017-07-31T21:12:43 | 2017-07-31T21:12:43 | 98,236,531 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import bisect
class Heap(object):
def __init__(self):
self.__values = {}
self.__sections = []
def get_at(self, address):
assert address in self.__values, "Uninitialized memory access"
return self.__values[address]
def set_at(self, address, value):
assert 0 < value < 2**64
self.__values[address] = value
def clean_at(self, address):
del self.__values[address]
def get_region_at(self, address, count):
return [self.get_at(a) for a in range(address, address + count)]
def set_region_at(self, address, values):
assert all(0 < value < 2**64 for value in values)
for a, v in zip(range(address, address + len(values)), values):
self.set_at(a, v)
def clean_region_at(self, address, count):
for a in range(address, address + count):
self.clean_at(a)
def __reserve(self, start, size):
bisect.insort_left(self.__sections, (start, size))
def allocate(self, size):
"""First-free allocator."""
next_section_start = 0
for s_start, s_size in self.__sections:
space_between = s_start - next_section_start
if space_between >= size:
self.__reserve(s_start, size)
return s_start
next_section_start = s_start + s_size + 1
# no space between sections, allocate from the end of the memory
self.__reserve(next_section_start, size)
return next_section_start
def resize(self, ptr, newsize):
# is there enough size to just extend the area?
for index, (s_start, s_size) in enumerate(self.__sections):
if s_start == ptr:
break
else:
raise RuntimeError("Invalid pointer passed to Heap.resize")
inplace = any(
newsize < s_size, # shinking
index == len(sections) - 1, # last section
sections[index + 1][0] - sections[index][0] > newsize # enough space
)
if inplace:
sections[index][1] = newsize
return ptr
else:
del self.__sections[index]
self.clean_region_at(s_start, s_size)
newptr = self.alloc(newsize)
self.set_region_at(newptr, self.get_region_at(ptr, s_size))
return newptr
def free(self, ptr):
for index, (s_start, s_size) in enumerate(self.__sections):
if s_start == ptr:
del self.__sections[index]
self.clean_region_at(s_start, s_size)
return
raise RuntimeError("Invalid pointer passed to Heap.free")
| UTF-8 | Python | false | false | 2,666 | py | 16 | heap.py | 14 | 0.560765 | 0.555139 | 0 | 79 | 32.746835 | 80 |
ed-ortizm/L-G-opt | 15,083,925,144,626 | 241425fdd346389044b47bcbb1e48fe05145ae0f | b1bd390e45e4e177b3e37f33d3182150373f9731 | /GA/ga.py | 43852792c49ce548cb458d6849440749e10a808c | [
"MIT"
] | permissive | https://github.com/ed-ortizm/L-G-opt | 5e4bb00672d7b6c8058232c6d1c608274b6668bc | 59bd3df1cc8db02dd23a9e8a4780844d456cd928 | refs/heads/master | 2021-03-21T07:32:22.962611 | 2020-04-18T00:45:23 | 2020-04-18T00:45:23 | 247,275,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import numpy as np
from math import sin,pi
from scipy.optimize import approx_fprime as p_grad # point gradient of a scalar funtion
#3Dploting
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
## From charbonneau1995: GAs in astronomy and astrophysics
# A top-level view of a genetic algorithm is as follows: given a
# target phenotype and a tolerance criterion,
# 1. Construct a random initial population and evaluate the
# fitness of its members.
# 2. Construct a new population by breeding selected individuals
# from the old population.
# 3. Evaluate the fitness of each member of the new population.
# 4. Replace the old population by the new population.
# 5. Test convergence; unless fittest phenotype matches target
# phenotype within tolerance, goto step 2.
## https://github.com/ahmedfgad/GeneticAlgorithmPython/blob/master/ga.py
def fitness(population,nn=1):
# Fitness value of my individuals
n= nn
fitness = np.zeros(population[:,0].size)
i = 0
for x in population:
f = (16*x[0]*(1-x[0])*x[1]*(1-x[1])*sin(n*pi*x[0])*sin(n*pi*x[1]))**2
fitness[i] = f
i = i+1
return fitness
def mating_pool(population, fitness, n_parents):
# Return the individuals sorted from the fittest to the less fit
parents = np.zeros((n_parents,2))
for parent in range(n_parents):
max_fit_idx = np.where(fitness==np.max(fitness))
# out: (array([val]),)
max_fit_idx = max_fit_idx[0][0]
parents[parent,:] = population[max_fit_idx,:]
# Now we delete that fitness value to make sure we go for the next one
# during the next iteration
fitness[max_fit_idx] = -1.
return parents
def crossover(parents):
n_offsprings = parents[:,0].size
offsprings = np.zeros((n_offsprings*2+2,2))
for i in range(n_offsprings+1):
# Indexes for the mates
p1_idx = i% parents.shape[0]
p2_idx = (i+1)% parents.shape[0]
p1_x = str(parents[p1_idx][0])[:10]
p1_x = convert(p1_x)
p1_y = str(parents[p1_idx][1])[:10]
p1_y = convert(p1_y)
p1_xy = p1_x + p1_y
p2_x = str(parents[p2_idx][0])[:10]
p2_x = convert(p2_x)
p2_y = str(parents[p2_idx][1])[:10]
p2_y = convert(p2_y)
p2_xy = p2_x + p2_y
# Offspring 1
offsp_1 = p1_xy[0:3] + p2_xy[3:]
offsp_1_x = float('0.' + offsp_1[:8])
offsp_1_y = float('0.' + offsp_1[8:])
# Offspring 2
offsp_2 = p1_xy[3:] + p2_xy[:3]
offsp_2_x = float('0.' + offsp_2[:8])
offsp_2_y = float('0.' + offsp_2[8:])
# Collecting offsprings
offsprings[2*i][0] = offsp_1_x
offsprings[2*i][1] = offsp_1_y
offsprings[2*i+1][0] = offsp_2_x
offsprings[2*i+1][1] = offsp_2_y
return offsprings
def mutation(offsprings, num_mutations=1,p_mut=0.01):
offsprings_mutated = np.zeros(offsprings.shape)
i = 0
for offsp in offsprings:
x = str(offsp[0])
x = convert(x)
y = str(offsp[1])
y = convert(y)
xy = x+y
for mutation in range(num_mutations):
if np.random.random() < p_mut:
idx = np.random.randint(0,15)
gene = str(np.random.randint(0,9))
if idx == 0:
xy = gene + xy[1:]
elif idx == 15:
xy = xy[:idx] + gene
else:
xy = xy[0:idx] + gene + xy[idx+1:]
offsprings_mutated[i][0] = float('0.' + xy[:8])
offsprings_mutated[i][1] = float('0.' + xy[8:])
i = i+1
return offsprings_mutated
def convert(x):
# Function to convert the number to a 8 characters string.
if len(x)==10:
x= x[2:]
elif 'e' in x:
aux = ''
aux2= ''
idx_e= x.index('e')
if '.' in x: idx_d= x.index('.')
exp = int(x[idx_e+2:])
if exp == 8:
if '.' in x:
x = x[:idx_d] + x[idx_d+1:idx_e]
for i in range(8-len(x)):
aux = aux + '0'
x = aux + x
else:
for i in range(exp-1):
aux = aux + '0'
x = aux + x[:idx_e]
else:
if '.' in x:
x = x[:idx_d] + x[idx_d+1:idx_e]
for i in range(7-exp):
aux2 = aux2 + '0'
x = x + aux2
for i in range(8-len(x)):
aux = aux + '0'
x = aux + x
else:
x = x[:idx_e]
for i in range(8-exp):
aux2 = aux2 + '0'
x = x + aux2
for i in range(8-len(x)):
aux = aux + '0'
x = aux + x
else:
aux= ''
x = x[2:]
for i in range(8-len(x)):
aux= aux + '0'
x = x + aux
return x
class F_plt:
def __init__(self, n = 1):
self.n = n
def geval(self,x):
# because of the np.meshgrid I get z = [f([x1,:]),f([x2,:]),...]
# Grid evaluation for ploting
X,Y = np.meshgrid(x[0], x[1])
g_f = (16*X*(1-X)*Y*(1-Y)*np.sin(self.n*np.pi*X)*np.sin(self.n*np.pi*Y))**2
return g_f
def plt2(self,population,n_gen,nn,max):
x = np.linspace(0.,1.,1_000)
XY = np.array([x,x]) # new rules, now eval gets one array
X,Y = np.meshgrid(XY[0],XY[1])
z = self.geval(XY)
fig,ax = plt.subplots(1,1)
cp = ax.contourf(X, Y, z)
fig.colorbar(cp)
max = 'Max= ' + str(max)[:5]
plt.scatter(population[:,0],population[:,1], color='r',label=max)
plt.legend()
plt.title("generation: " + str(n_gen) + ', n= ' + str(nn))
plt.xlabel('x')
plt.ylabel('y')
plt.savefig("generation_" + str(n_gen) + '_n_' + str(nn) + '.png')
plt.close()
| UTF-8 | Python | false | false | 6,051 | py | 2,211 | ga.py | 7 | 0.513304 | 0.480416 | 0 | 176 | 33.380682 | 87 |
alaamjadi/SAR | 1,451,698,990,840 | 85f5a49799fa04d60b6c71691968dec1ae226c0d | f7cabb1790f9bfe3e9d17a96c0ea36efe3b1fdd5 | /binary_tree.py | 92a5dce57cdea9ca893e5c29b16e2e0fe55a22e0 | [
"MIT"
] | permissive | https://github.com/alaamjadi/SAR | d637aee018d57463f2dbcb942c07fba179a5609d | 63f6e5a734a6e78709cac0d284b966419a904260 | refs/heads/master | 2022-12-20T18:18:19.801962 | 2020-09-24T20:15:56 | 2020-09-24T20:15:56 | 291,810,699 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import network_utils as nu
"""
Container for binary tree nodes
"""
class Node:
value = None
zero = None
one = None
dst_root = None
end = False
node_rules = None
def __init__(self, value="^", end=False):
self.value = value
self.end = end
self.node_rules = []
"""
This Method add second-tier nodes to the tree recursively
"""
def add_dst_nodes(node, dst_rule, index, rule_index):
# Reached the last binary number
if len(dst_rule) <= index :
# We reached the end node, we can choose one of the attributes for assigning "$" sign. I have chosen attribute zero (left) as an indicator, it could be even attribute one (right). If there isn't any we create it.
if node.zero is None:
node.zero = Node("$",end=True)
# If there is already a node zero, change the end attribute to True.
if not node.zero.end :
node.zero.end = True
#node.zero.node_rules = []
# Adding the current rule to this node
node.zero.node_rules.append(rule_index)
return
# Adding the left node
if dst_rule[index] == "0":
# Does this node has a left node? If no, create a new node as left
if node.zero is None:
node.zero = Node(value=(node.value + "0"))
# Continue with adding nodes from the left node
add_dst_nodes(node.zero,dst_rule, index+1, rule_index)
# Adding the right node
else :
# Does this node has a right node? If no, create a new node as right
if node.one is None:
node.one = Node(value=(node.value + "1"))
# Continue with adding nodes from the right node
add_dst_nodes(node.one,dst_rule, index+1, rule_index)
"""
This Method add first-tier nodes to the tree recursively
"""
def add_src_nodes(node, src_rule, index, dst_rule, rule_index):
# Rule.src_sub = * => src_rule = None
if src_rule is None:
src_rule=[]
# src_rule = [] which means src_sub=* or Reached the last binary number
if len(src_rule) == index:
# Checking if it is a root Tier 2 node. If not
if dst_rule is None:
# Is this node already gray (node.end=True)? If no, make the node_rules empty.
if not node.end:
node.node_rules = []
# Making the node.end=True since we reached the end of Tier 1 and this is a gray node
node.end = True
# Adding the current rule to this node
node.node_rules.append(rule_index)
return
# Is this already a root Tier 2 node? If no, add a new node as root Tier2
if node.dst_root is None:
node.dst_root = Node(value="#")
#># Now go for the nodes in Tier 2
add_dst_nodes(node.dst_root, dst_rule, 0, rule_index)
return
# Adding the left node
if src_rule[index] == "0":
# Does this node has a left node? If no, create a new node as left
if node.zero is None:
node.zero = Node(value=(node.value + "0"))
# Continue with adding nodes from the left node
add_src_nodes(node.zero, src_rule, index+1, dst_rule, rule_index)
# Adding the right node
else :
# Does this node has a right node? If no, create a new node as right
if node.one is None:
node.one = Node(value=(node.value + "1"))
# Continue with adding nodes from the right node
add_src_nodes(node.one, src_rule, index+1, dst_rule, rule_index)
"""
Depict the tree
"""
def show(root, indent="", has_zero=False):
last_indent = "|--"
if has_zero:
last_indent = "--"
elif root.value == "^":
indent = ""
last_indent = ""
if not root.end:
print("%s%svalue = %s" % (indent, last_indent, root.value))
else:
print("%s%svalue = %s, rules: %s" % (indent, last_indent, root.value, root.node_rules))
if root.one is not None:
if root.zero is None:
show(root.one, indent + " ")
else:
show(root.one, indent + " |", True)
if root.zero is not None:
if root.dst_root is None:
show(root.zero, indent + " ")
else:
show(root.zero, indent + " |", True)
if root.dst_root is not None:
show(root.dst_root, indent + " ")
"""
Classification Algorithm - Matching the Tier 2 Nodes
"""
def match_dst(node, dst_bin, dst_index, candidate_actions):
# If the end attribute is True, It means we reached the end node in Tier 2, so we add its rules to candidate_actions list
if node.end :
candidate_actions.extend(node.node_rules)
# If we reach the maximum number index, we add its rules to candidate_actions list and return
if dst_index > 32:
if node.zero.value == "$":
candidate_actions.extend(node.zero.node_rules)
return
# It goes forward with the matching in case of having "0" as binary address. If the value is "$" it means we have reached the end and the node.zero.end is True, so it will be catched in next round.
if node.zero is not None and (dst_bin[dst_index] == "0" or node.zero.value == "$"):
match_dst(node.zero, dst_bin, dst_index+1, candidate_actions)
# It goes forward with the matching in case of having "1" as binary address.
if node.one is not None and dst_bin[dst_index] == "1":
match_dst(node.one, dst_bin, dst_index+1, candidate_actions)
return
"""
Classification Algorithm - Matching the Tier 1 Nodes, at the end jumps to the matching of Tier 2 nodes
"""
def match_src(node, src_bin, src_index, dst_bin, dst_index, candidate_actions):
# If the end attribute is True, It means we reached the end node in Tier 1, so we add its rules to candidate_actions list
if node.end :
candidate_actions.extend(node.node_rules)
# If dst_root has a value it means it has the reference to root node for Tier 2
if node.dst_root is not None:
match_dst(node.dst_root, dst_bin, dst_index, candidate_actions)
# It goes forward with the matching in case of having "0" as binary address. If the value is "$" it means we have reached the end and the node.zero.end is True, so it will be catched in next round.
if node.zero is not None and (src_bin[src_index] == "0" or node.zero.value == "$" ):
match_src(node.zero, src_bin, src_index+1, dst_bin, dst_index, candidate_actions)
# It goes forward with the matching in case of having "1" as binary address.
if node.one is not None and src_bin[src_index] == "1":
match_src(node.one, src_bin, src_index+1, dst_bin, dst_index, candidate_actions)
return
def get_packets_actions(root, packets, all_rules, debug):
# This list contains all the incoming packet match results
actions=[]
# Counter for tracking the matches and no matches among all the incoming packets. The sum of this two is equal to all of the incoming nodes
noMatch = 0
Matched = 0
# Running the Clasification Algorithm for every incoming packet
for packet in packets:
# This list contains all the possible matchs for Tier 1 and Tier 2 (Field 1 and 2)
candidate_actions = []
# Matching the incoming packet with the rule tree for the Tier 1 nodes (Filed 1). At the end it will jump to the Matching for the Tier 2 nodes (Field 2)
match_src(root, packet.src_binary, 0, packet.dst_binary, 0, candidate_actions)
# This list contains all the possible matchs for T1 & T2 and considering protocol and port matches too (Field 3 & Field 4 & Field 5)
final_actions = []
# Checking the candidates of one packet for other fields (protocol and port)
# If there is no match here, it will continue in the loop over candiate_actions, it will check other candidates.
# The outcome of this check is final_actions.
for i in candidate_actions:
if all_rules[i].protocol != '*' and all_rules[i].protocol != packet.protocol:
continue
if not nu.is_in_port_range(all_rules[i].src_port, packet.src_port):
continue
if not nu.is_in_port_range(all_rules[i].dst_port, packet.dst_port):
continue
final_actions.append(i)
# Now we have all the full possible matches in the final_actions list.
# If the list is empty, it means there were no matches.
# If the list is not empty, we have to sort it by rule index number and choose the one with lower priority number.
# In case we have a match
if len(final_actions) != 0:
Matched = Matched + 1
# Sorting the matches and choosing the first one in the list
final_rule = all_rules[sorted(final_actions)[0]]
# Printing out the match result (Rule, Incoming Packet)
""" print(
"Packet>> ".ljust(10) +
"sIP: %s".ljust(20) % (packet.src_ip) +
"dIP: %s".ljust(20) % (packet.dst_ip) +
"protocol: %s".ljust(14) % (packet.protocol) +
"sPort: %s".ljust(12) % (packet.src_port) +
"dPort: %s".ljust(12) % (packet.dst_port) +
"\n" +
"Rule>>".ljust(10) +
"sIP: %s".ljust(20) % (final_rule.src_sub) +
"dIP: %s".ljust(20) % (final_rule.dst_sub) +
"protocol: %s".ljust(14) % (final_rule.protocol) +
"sPort: %s".ljust(12) % (final_rule.src_port) +
"dPort: %s".ljust(12) % (final_rule.dst_port) +
"action: %s".ljust(20) % (final_rule.action) +
"Priority: %s" %(str(sorted(final_actions)[0]+1)) +
"\n") """
# Adding the final match of each packets to the actions list
actions.append(all_rules[sorted(final_actions)[0]].action)
# In case we don't have a match
else:
noMatch = noMatch + 1
#This loop iterates for all the incoming packets.
#
print("%d".ljust(8) %Matched + "packets matched the rules.\n" + "%d".ljust(5) %noMatch + "packets did not match the rules.\n")
return actions
| UTF-8 | Python | false | false | 10,228 | py | 47 | binary_tree.py | 9 | 0.602757 | 0.594153 | 0 | 215 | 46.572093 | 220 |
Breee/rocketchat-pollbot | 4,999,341,955,429 | ca015e9caef8037d1921d319350dabd68239d6d6 | c99727a336231adad80d5891f52235c8cd1b2d3b | /Poll.py | d0d24da90c43b66e5f9d21f908ae4d1e2135bfef | [
"MIT"
] | permissive | https://github.com/Breee/rocketchat-pollbot | 4e79a35612180bc072c5777100c103e001bee520 | d8c37ea80b2c9649bfc0cc882da4daf3a0b0bffc | refs/heads/master | 2020-04-02T00:59:39.659534 | 2018-10-30T13:43:44 | 2018-10-30T13:43:44 | 153,831,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
MIT License
Copyright (c) 2018 Breee@github
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from emojistorage import *
import time
class PollCreationException(Exception):
pass
POLL_ID = 0
class Poll(object):
"""
A Poll object, used as parent for SinglePoll and MultiPoll.
"""
def __init__(self, poll_title, vote_options):
global POLL_ID
self.poll_ID = POLL_ID
POLL_ID += 1
self.creation_time = time.time() # timestamp
self.poll_msg = None
self.creation_msg = None
self.creator = None
self.poll_title = poll_title # string
# vote_options of the form [op1,op2,op3...]
self.vote_options = vote_options
# reactions dict is of the form:
# {':regional_indicator_a:': {'usernames': ['testbot', 'Bree']},
# ':regional_indicator_b:': {'usernames': ['testbot']},
# ':regional_indicator_c:': {'usernames': ['testbot']}}
self.reactions = dict()
self.options_to_users = dict()
self.user_to_amount = dict()
self.option_to_reaction = dict()
self.reaction_to_option = dict()
for i, option in enumerate(vote_options):
if i in EmojiStorage.NUMBER_TO_LETTEREMOJI:
reaction = EmojiStorage.NUMBER_TO_LETTEREMOJI[i]
self.option_to_reaction[option] = reaction
self.reaction_to_option[reaction] = option
self.options_to_users[option] = []
def process_reactions(self, botname):
tmp = {key: [] for key in self.options_to_users}
self.options_to_users = tmp
self.user_to_amount = dict()
for reaction, userdict in self.reactions.items():
users = userdict['usernames']
if reaction in EmojiStorage.LETTEREMOJI_TO_NUMBER:
option = self.reaction_to_option[reaction]
for user in users:
if user != botname:
if user not in self.user_to_amount:
self.user_to_amount[user] = 1
self.options_to_users[option].append(user)
elif reaction in EmojiStorage.DEFAULT_PEOPLE_EMOJI_TO_NUMBER:
for user in users:
if user != botname:
if user not in self.user_to_amount:
self.user_to_amount[user] = EmojiStorage.DEFAULT_PEOPLE_EMOJI_TO_NUMBER[reaction]
else:
self.user_to_amount[user] += EmojiStorage.DEFAULT_PEOPLE_EMOJI_TO_NUMBER[reaction]
def create_message(self):
"""
creates message of the form:
TITLE
Reaction1 Option1 [5]
user1 [2], user2 [3]
Reaction2 Option2 [2]
user3[1], user4[1]
:return:
"""
attachments = {"title": self.poll_title, "color": "#ff6644", 'collapsed': False}
msg = "*%s* \n\n" % (self.poll_title)
text = ""
for option, users in self.options_to_users.items():
reaction = self.option_to_reaction[option]
user_string = ""
total = 0
for i in range(len(users)):
user = users[i]
amount = self.user_to_amount[user]
user_string += "%s [%d]" % (user, amount)
total += amount
if i < len(users)-1:
user_string += ", "
msg += "*%s %s [%d]* \n\n %s \n\n " % (reaction, option,total,user_string)
text += "*%s %s [%d]* \n\n %s \n\n " % (reaction, option,total,user_string)
attachments['text'] = text
return msg, [attachments]
| UTF-8 | Python | false | false | 4,684 | py | 10 | Poll.py | 8 | 0.591375 | 0.58497 | 0 | 127 | 35.88189 | 110 |
zbut/verilog_tools | 15,590,731,308,093 | a56bddfd28fd35161fc513790cf0c967db33f78c | d92432e150e5eb687da5d7ec96e79cb5c6ac790e | /auto_tb.py | 044169942638d5c233fe8d1a4bad6476d0072073 | [] | no_license | https://github.com/zbut/verilog_tools | 6d75f250193b5311d9e49b409685735ab50c3fb9 | a984519565a749d9e3a4ad0155679b21ca5c192b | refs/heads/master | 2020-04-09T04:31:36.959496 | 2018-12-02T08:13:42 | 2018-12-02T08:13:42 | 160,026,624 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from jinja2 import Environment, PackageLoader
from module_parser import ModuleParser
import os
import logging
import random
logger = logging.getLogger(__name__)
TEMPLATE_PATH = "tb_template.v"
class AutoTb(object):
NOF_RAND_VALUES = 3000
def __init__(self, file_path):
# Parse the verilog file
module_parser = ModuleParser(file_path)
self.parameters = module_parser.get_parameters()
self.inputs = module_parser.get_inputs()
self.outputs = module_parser.get_outputs()
self.inst_name = os.path.splitext(os.path.basename(file_path))[0]
self.tb_name = self.inst_name + "_tb"
# Build a dict from the parameters
parameters_dict = {parameter.name: parameter.default_value for parameter in self.parameters}
self._parameters_dict = {}
for name, default_value in parameters_dict.items():
self._parameters_dict[name] = eval(default_value, {}, parameters_dict)
self.clk_input = self._find_clk_signal()
self.found_clk = self.clk_input is not None
self.rst_input = self._find_rst_signal()
self.found_rst = self.rst_input is not None
self.is_rst_negative = self.rst_input.name.endswith("_n") if self.found_rst else None
self.list_of_input_dicts = []
def _find_clk_signal(self):
for input_wire in self.inputs:
if "clk" in input_wire.name.lower() or "clock" in input_wire.name.lower():
return input_wire
logger.error("Could not find clk signal")
def _find_rst_signal(self):
for input_wire in self.inputs:
if "rst" in input_wire.name.lower() or "reset" in input_wire.name.lower():
return input_wire
logger.error("Could not find rst signal")
def dump_tb_to_file(self, tb_path, include_parser=None):
"""
Dumps a test bench to the given path
:param tb_path: The path to dump the tb to
:param include_parser: Optional IncludeParser object to be used for width evaluation
:return:
"""
eval_dict = dict(self._parameters_dict)
if include_parser is not None:
eval_dict.update(include_parser.get_macros_dict())
for input_wire in self.inputs:
input_wire.update_numeric_width(eval_dict)
for output_wire in self.outputs:
output_wire.update_numeric_width(eval_dict)
self._create_values_for_inputs()
env = Environment(loader=PackageLoader('verilog_tools', 'templates'))
template = env.get_template(TEMPLATE_PATH)
tb_data = template.render(TB=self)
tb_file = open(tb_path, "w")
tb_file.write(tb_data)
def _create_values_for_inputs(self):
for i in range(self.NOF_RAND_VALUES):
inputs_dict = {}
for input_signal in self.inputs:
if (self.found_clk and input_signal.name == self.clk_input.name) or \
(self.found_rst and input_signal.name == self.rst_input.name):
continue
value = random.randrange(0, 1 << input_signal.width_numeric, 1)
inputs_dict[input_signal] = value
self.list_of_input_dicts.append(inputs_dict)
| UTF-8 | Python | false | false | 3,248 | py | 6 | auto_tb.py | 5 | 0.620074 | 0.617303 | 0 | 76 | 41.710526 | 100 |
joshdsy/python | 18,545,668,819,699 | b12a0f22c2952e88de9e1e2b2dc3db79727f90d6 | a67573b4617b37b96ece777e68e7f25f95b501bd | /exercises/03-seven-segment-display.py | cbe4ed2ad51e06fa0e1b95886aede31be66a5a8d | [] | no_license | https://github.com/joshdsy/python | 06888afa69de00f5e6e2fb780bf351a1d4ef73a1 | 98bfaa3461a8404e664475309169859f4e6ada82 | refs/heads/master | 2017-04-27T12:49:16.396894 | 2016-08-22T10:22:48 | 2016-08-22T10:22:48 | 61,610,076 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python3
# Write a program to convert an list of digits into a 8 segment display read out.
# Eg, given [3, 4, 5, 6], output is:
#
# _ _ _
# _| |_| |_ |_
# _| | _| |_|
# Hint: Store the 7-segment representation of each digit in a list of 3 strings
# with each string representing one line of the number.
#
# eg: three = [ " _ ", " _|", " _|" ]
#
import sys
scan_lines = [
[ " _ ", "| |", "|_|" ], # 0
[ " ", " |", " |" ], # 1
[ " _ ", " _|", "|_ " ], # 2
[ " _ ", " _|", " _|" ], # 3
[ " ", "|_|", " |" ], # 4
[ " _ ", "|_ ", " _|" ], # 5
[ " _ ", "|_ ", "|_|" ], # 6
[ " _ ", " |", " |" ], # 7
[ " _ ", "|_|", "|_|" ], # 8
[ " _ ", "|_|", " |" ], # 9
]
input = int(sys.argv[1])
def int_to_digits(input):
if input == 0:
return []
else:
last_digit = input%10
remaining_number = int(input/10)
remaining_digits = int_to_digits(remaining_number)
return remaining_digits + [last_digit]
print(int_to_digits(input))
for line_number in range(0, 3):
for digit in int_to_digits(input):
print(scan_lines[digit][line_number], end="")
print("")
| UTF-8 | Python | false | false | 1,183 | py | 11 | 03-seven-segment-display.py | 10 | 0.431107 | 0.409129 | 0 | 44 | 25.886364 | 81 |
militska/competitors | 18,322,330,487,284 | c30aa83e39a295cb537b47af8c556e9402bc9960 | a10641cf61c7efbea2d57c206972562fd8b035ed | /modules/weather/Weather.py | b8a38be1405e7756beb964918c8f5cd6bc729d7c | [] | no_license | https://github.com/militska/competitors | 2371f5da597923ef3a5477ac7ee5610c084222ab | da06865abeb14bc9b7e665158b8999119cffa476 | refs/heads/master | 2021-06-29T18:37:44.929268 | 2020-09-18T16:36:53 | 2020-09-18T16:36:53 | 150,991,809 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import randint
# методы пустышки, нужны для демонстрации пркоси и фасада
class Weather:
speed_wind = 0
def __init__(self):
self.speed_wind = randint(0, 40)
def get_speed_wind(self):
return self.speed_wind
def get_random_wind(self):
pass
def get_name_current_wind(self):
pass
def rename_name_current_wind(self):
pass
def get_next_wind(self):
pass
| UTF-8 | Python | false | false | 490 | py | 12 | Weather.py | 10 | 0.611738 | 0.602709 | 0 | 25 | 16.72 | 57 |
foremostxiao/d | 3,951,369,914,749 | 7225606f0e6d685ceb8982d7d845a5be1d3358ce | a38670ee08ea64af33477899a68ee22936f70ce7 | /luffy/第三模块/第6章网络编程/第6章每小节/5 文件传输/上传功能/客户端.py | 0290beac3dbab3f37e047bb3d8ee22b63b144b15 | [] | no_license | https://github.com/foremostxiao/d | 40ed37215f411e8b081a4cb92c8ecbd335cd9d76 | fe80672adc6b2406365b05d5cedd02c6abf66c11 | refs/heads/master | 2020-03-29T13:51:19.589004 | 2018-09-23T09:29:56 | 2018-09-23T09:29:56 | 149,985,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import struct
import json
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from db import settings
def run():
phone=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
phone.connect(('127.0.0.1',9900))
while True:
# 1、发命令
cmd = input('>>: ').strip() # get 3.jpeg
if not cmd: continue
cmds = cmd.split()
filename = cmds[1]
try:
header_dic = {
'filename': filename,
'md5': 'xxdxxx',
'file_size': os.path.getsize(os.path.join(settings.path_client, filename))
}
header_json = json.dumps(header_dic)
header_bytes = header_json.encode('utf-8')
phone.send(struct.pack('i', len(header_bytes))) # len(header_bytes)发送信息给客户端的字节长度
phone.send(header_bytes) # 客户端发两次
with open(os.path.join(settings.path_client, filename), 'rb') as f:
for line in f:
phone.send(line)
except ConnectionResetError: # 适用于windows操作系统
break
phone.close()
if __name__ == '__main__':
run() | UTF-8 | Python | false | false | 1,248 | py | 165 | 客户端.py | 142 | 0.558081 | 0.545455 | 0 | 38 | 30.289474 | 93 |
libmonsoon-dev/instabot | 5,798,205,872,973 | ac778c855177d3065447e24c396ce35f5e3dca8b | 29e11923c0d8b264190ead20a41a85dad6df0c59 | /src/modules/types.py | 65ba3bd85d59510c24fac1cf4e069c089fcc5854 | [] | no_license | https://github.com/libmonsoon-dev/instabot | 3c0ff47dcec604ce89017d19d4de7f6fe58dc40f | 0c89ddf40d1b5250104477f933daf1431011675b | refs/heads/master | 2020-04-23T08:32:44.305114 | 2019-02-17T21:16:19 | 2019-02-17T21:16:19 | 171,040,709 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import *
Mode = Union['unfollow']
HttpMethod = Union['POST', 'GET', 'PUT', 'DELETE']
| UTF-8 | Python | false | false | 99 | py | 9 | types.py | 6 | 0.646465 | 0.646465 | 0 | 5 | 18.8 | 50 |
pkk0/Take-a-break-MacOS | 11,982,958,783,339 | 66f521c313ffe004e33ac5281be76da7265f62aa | 231c28f519e4d7eb72fc03cf5e3f76bbd8bdcd36 | /application.py | 2f8d5df38aa161d9007cd80eb6316a542ac3b697 | [] | no_license | https://github.com/pkk0/Take-a-break-MacOS | 0d8fda3dc7d8e14f3609e8c52e29f1d3b651bdad | 3bc282100e4c3fb918e4afcc947a23eb1f2e0cfb | refs/heads/main | 2023-06-03T08:29:08.994089 | 2021-06-23T11:40:31 | 2021-06-23T11:40:31 | 376,577,045 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import rumps
from datetime import datetime, timedelta
from enum import Enum
from configparser import ConfigParser
import global_memory
from system_wakes_daemon import SystemWakesDaemon
# Consts
APP_NAME = 'Take a break'
CONFIG_FILE = 'config.INI'
BREAK_NOTIFICATION_MESSAGE = 'It\'s time for a little break!'
CMD_TIME_FORMAT = '%Y-%m-%d'
# Application states
class State(Enum):
WORK = 0
BREAK = 1
class Application(rumps.App):
def __init__(self, config):
super().__init__(APP_NAME)
self.state_seconds_counter = 0
self.state = State.WORK
self.start_datetime = datetime.now()
self.config = config
def calculate_current_state_seconds_left(self):
if self.state == State.WORK:
return float(self.config['DEFAULT']['WORK_TIME']) * 60 - self.state_seconds_counter + 1
elif self.state == State.BREAK:
return float(self.config['DEFAULT']['BREAK_TIME']) * 60 - self.state_seconds_counter + 1
def restart_to_state(self, state):
self.state_seconds_counter = 0
self.state = state
def tick(self):
self.state_seconds_counter += 1
seconds_left = self.calculate_current_state_seconds_left()
if self.state == State.WORK:
self.title = f"🧑🏻💻 [W] {format_seconds(seconds_left)}"
if not seconds_left:
self.restart_to_state(State.BREAK)
print('asd')
rumps.notification('', BREAK_NOTIFICATION_MESSAGE, '')
else:
self.title = f"💤 [B] {format_seconds(seconds_left)}"
if not seconds_left:
self.restart_to_state(State.WORK)
local_last_system_wake = global_memory.last_system_wake # thread safe
if local_last_system_wake and local_last_system_wake > self.start_datetime:
self.restart_to_state(State.WORK)
self.start_datetime = local_last_system_wake
@rumps.timer(1)
def update(self, _):
self.tick()
@rumps.clicked('Restart work timer')
def restart(self, _):
self.restart_to_state(State.WORK)
def format_seconds(s):
return ":".join(str(timedelta(seconds=s)).split(':')[1:])
def read_config():
config = ConfigParser()
config.read(CONFIG_FILE)
return config
def run():
config = read_config()
SystemWakesDaemon(config).start()
Application(config).run()
if __name__ == "__main__":
run()
| UTF-8 | Python | false | false | 2,459 | py | 7 | application.py | 4 | 0.617587 | 0.61227 | 0 | 88 | 26.784091 | 100 |
ApolloNegar/SimpleNotebook | 360,777,277,006 | 44fa5f17e2207b4699daa1024bd16734fcb9d6af | f99ee7ad84f39547dfbf0fa6e3c28f8587b05c2c | /notes/views.py | 5afd8ba85c286bbd506303fb7dc1e30c1788b7e0 | [] | no_license | https://github.com/ApolloNegar/SimpleNotebook | b4146db89711957014a975565a96ddb73aa72423 | 65938ebf3fa05e556cc2b3f1703e52fdbcd66698 | refs/heads/master | 2022-12-01T03:38:20.576478 | 2020-01-31T18:10:04 | 2020-01-31T18:10:04 | 208,629,762 | 0 | 0 | null | false | 2022-11-22T04:33:26 | 2019-09-15T17:23:14 | 2020-01-31T18:11:20 | 2022-11-22T04:33:26 | 49 | 0 | 0 | 3 | Python | false | false | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpRequest, Http404
from django.shortcuts import render, redirect, get_object_or_404
from django.db import models
from django.utils import timezone
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from .models import Post
# Create your views here.
def notes(request):
context = {'posts': Post.objects.all()}
return render(request, 'notes/notes.html', context)
class UserPostListView(ListView):
model = Post
template_name = 'notes/table_notes.html'
context_object_name = "posts"
paginate_by = 10
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('order')
class UserPostDetailView(DetailView):
model = Post
class UserUpdateView(
LoginRequiredMixin,
UserPassesTestMixin,
UpdateView): # a view with a form, when we update a post
model = Post
fields = ['title', 'content']
success_url = '/user-direct/'
def form_valid(self, form):
form.instance.author = self.request.user
form.save()
now = timezone.now()
form.instance.date_modified = now
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class UserDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/reorder/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def user_direct(request):
return redirect('user-posts', request.user.username)
@login_required
def post_order_change(request, **kwargs):
if request.method == 'POST':
entered_order = request.POST.get('order') # goal order
p_1 = Post.objects.get(author=request.user, order=kwargs['order']) # the current order of the post
p_2 = Post.objects.get(author=request.user, order=int(entered_order)) # target post
p_2.order = int(kwargs['order'])
p_2.save()
p_1.order = int(entered_order)
p_1.save()
else:
raise Http404
context = {'user': request.user}
return render(request, 'notes/post_order.html', context)
def reorder(request, ):
total_count = Post.objects.filter(author=request.user).count()
t = total_count
while 0 < t:
for p in Post.objects.filter(author=request.user):
p.order = t
p.save()
t -= 1
return redirect('/user/{}'.format(request.user))
class UserCreateView(LoginRequiredMixin, CreateView): # a view with a form, when we create a new post
model = Post
fields = ['title', 'content']
success_url = '/user-direct/'
def form_valid(self, form):
form.instance.author = self.request.user
form.save()
total_count = Post.objects.filter(author=self.request.user).count()
print(total_count)
t = total_count
if t == 1:
form.instance.order = 1
else:
while 1 < t:
for p in Post.objects.filter(author=self.request.user):
p.order = t
p.save()
t -= 1
form.instance.order = 1
return super().form_valid(form)
| UTF-8 | Python | false | false | 3,743 | py | 21 | views.py | 12 | 0.622762 | 0.615282 | 0 | 144 | 24.993056 | 107 |
dicaso/lostdata | 16,372,415,376,591 | d2901152c8e4ca15557ec13d4798d889a5024763 | ae1f6d736c1560f35076c127474918554226e34b | /lostdata/config.py | a3a4a5dc31bb976b9eb97f6528cb1199ce7c9985 | [] | no_license | https://github.com/dicaso/lostdata | 6787a74ae1c74c24f8cdad602cfeede2cc81e36c | ff3e8f5d24ce01424fd7c03033e422954d3c7e44 | refs/heads/master | 2021-06-22T08:27:05.657960 | 2020-12-16T13:02:19 | 2020-12-16T13:02:19 | 142,540,758 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""LSD configuration
This module organizes the configuration for the lostdata package
Todo:
* Give example symlinking privatedir ln -s ~/Dropbiz/Lab/z_archive/Datasets ~/LSData/private
"""
import configparser, os, appdirs
configdir = appdirs.AppDirs('lostdata').user_config_dir
appdatadir = appdirs.AppDirs('lostdata').user_data_dir
# Check existance of app dirs
if not os.path.exists(configdir):
os.makedirs(configdir)
print('created lostdata configdir', configdir)
if not os.path.exists(appdatadir):
os.makedirs(appdatadir)
print('created lostdata data dir', appdatadir)
configFileOptions = [
'lsd.cfg', # in current working dir
os.path.join(configdir, 'lsd.cfg')
]
# Default configuration
config = configparser.ConfigParser()
config['LSD'] = {
'cachetime': '4w', #supports w[eeks], d[ays] or h[ours]
'cachedir': os.path.join(appdatadir, 'cache'),
'privatedir': os.path.join(appdatadir, 'private')
}
# Read configuration file
config.read(configFileOptions)
# Check cache and privatedir
if not os.path.exists(config['LSD']['cachedir']):
os.makedirs(config['LSD']['cachedir'])
print('created lostdata cache dir', config['LSD']['cachedir'])
if not os.path.exists(config['LSD']['privatedir']):
os.makedirs(config['LSD']['privatedir'])
print('created lostdata private dir', config['LSD']['privatedir'])
# Secrets: config for storing user API keys and other sensitive/personal information
from kindi import Secrets
secrets = Secrets(default_section=__package__)
def makeLSDataLinks():
if not os.path.exists(os.path.expanduser('~/LSData')):
os.makedirs(os.path.expanduser('~/LSData'))
os.symlink(
config['LSD']['cachedir'],
os.path.expanduser('~/LSData/cache/')
)
os.symlink(
config['LSD']['privatedir'],
os.path.expanduser('~/LSData/private/')
)
| UTF-8 | Python | false | false | 1,893 | py | 25 | config.py | 24 | 0.692552 | 0.691495 | 0 | 61 | 30.032787 | 96 |
kbm1422/husky | 13,838,384,665,732 | a6f93aba5ed7234f3aaf087175e9c56812f1d03d | 436824f1eb892e8011d4bbc497963a7d9b9dbd7d | /.svn/pristine/9e/9e660d6a69c69220d298372ea8b722ee5f802cf3.svn-base | 209b79333947c8b2953901d807281eabe004440a | [] | no_license | https://github.com/kbm1422/husky | 93e1d5d135245c97066d988432fa347f591b005f | 82493f34a14de0724697c3d9484de89cae2776f0 | refs/heads/master | 2020-05-17T15:49:59.541073 | 2015-04-13T09:12:19 | 2015-04-13T09:12:19 | 33,658,291 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import errno
import random
import socket
def get_tcpport_not_in_use(a=40000, b=65535):
logger.info("random a tcpport not in use, range from %d to %d", a, b)
while True:
sock = socket.socket()
port = random.randint(a, b)
addr = ("0.0.0.0", port)
try:
sock.bind(addr)
except socket.error as err:
if err.errno == errno.EADDRINUSE:
logger.debug("addr <%d:%d> is in use, random another port", a, b)
continue
else:
raise
else:
return port
finally:
sock.close() | UTF-8 | Python | false | false | 725 | 213 | 9e660d6a69c69220d298372ea8b722ee5f802cf3.svn-base | 163 | 0.529655 | 0.508966 | 0 | 30 | 23.2 | 81 |
|
RoxanaTesileanu/Py_for_kids | 13,967,233,664,653 | 95e025f9c688a41e505e037bd4c7e188b82e8655 | 90edbc51ed95545b83377fa95f1dccc68a917896 | /py_for_kids_ch5.py | 14ab78431c12f2e6fd805292d435ceb8a1263924 | [] | no_license | https://github.com/RoxanaTesileanu/Py_for_kids | f6c5c003a2a424d3a52a8721d4bfd6f6df2ca7e1 | fce3704d73501554de99425b91e1a906dbaae91e | refs/heads/master | 2021-01-12T15:44:08.676521 | 2016-10-28T10:07:49 | 2016-10-28T10:07:49 | 71,873,596 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | Python 2.7.12 (default, Jul 1 2016, 15:12:24)
[GCC 5.4.0 20160609] on linux2
Type "copyright", "credits" or "license()" for more information.
>>> # questions = conditions
>>> # we combine these conditions and the responses into if statements
>>>
>>> age= 13
>>> if age> 20 :
print ('You\'r too old')
>>> age=23
>>> if age> 20 :
print ('You\'r too old')
You'r too old
>>> # if the answer is true the commands in the block will be run
>>> # a block is a group of programming statements
>>> age=25
>>> if age > 20 :
print('You are too old!')
print ('Why are you here?')
print ('Why aren\'t you mowing a lawn or sorting papers')
You are too old!
Why are you here?
Why aren't you mowing a lawn or sorting papers
>>> if age > 20 :
print('You are too old!')
print ('Why are you here?')
print ('Why aren\'t you mowing a lawn or sorting papers')
You are too old!
Why are you here?
Why aren't you mowing a lawn or sorting papers
>>> if age > 20 :
print('You are too old!')
print ('Why are you here?')
print ('Why aren\'t you mowing a lawn or sorting papers')
File "<pyshell#20>", line 4
print ('Why aren\'t you mowing a lawn or sorting papers')
^
IndentationError: unexpected indent
>>>
>>> age=10
>>> if age>10 :
print('you are too old for my jokes')
>>> age =1-
SyntaxError: invalid syntax
>>> age =10
>>> if age >= 10 :
print ('you are too old for my jokes')
you are too old for my jokes
>>> age=10
>>> if age == 10 :
print ('What\'s brown and sticky? A stick!!')
What's brown and sticky? A stick!!
>>> print ('want to hear a dirty joke?')
want to hear a dirty joke?
>>> age =12
>>> if age == 12 :
print ('a pig fell in the mud')
else :
print ("shh. it's a secret")
a pig fell in the mud
>>> age=8
>>> if ager ==12 :
print ('a pig fell in the mud')
else :
print ('shh. it\'s a secret')
Traceback (most recent call last):
File "<pyshell#47>", line 1, in <module>
if ager ==12 :
NameError: name 'ager' is not defined
>>> if age ==12 :
print ('a pig fell in the mud')
else :
print ('shh. it\'s a secret')
shh. it's a secret
>>> age=12
>>> if age ==10 :
print ('what do you call an unhappy cranberry?)
SyntaxError: EOL while scanning string literal
>>> if age ==10 :
print ('what do you call an unhappy cranberry?')
print('a blueberry!')
elif age==11 :
print ('what did the green grape say tp the blue grape?')
print('breathe! breath!')
elif age ==12 :
print ('what did 0 say to 8?')
print ('hi guys!')
elif age ==13 :
print ('why wasn\'t 10 afraid of 7?')
print ('because rather than eating 9, 78 pi')
else :
print ('huh?')
what did 0 say to 8?
hi guys!
>>> if age ==10 or age==11 or age==12 or age==13 :
print('what is 10 + 11+12+13? A mess!')
else :
print ('huh?')
what is 10 + 11+12+13? A mess!
>>> if age >=10 and age<=13 :
print ('what is 10+13? A mess!')
else :
print ('huh?')
what is 10+13? A mess!
>>> age
12
>>> myval=None
>>> print(myval)
None
>>> # empty value
>>> if myval==None :
print ('the variable myval doesn\'t have a value')
the variable myval doesn't have a value
>>>
>>> age
12
>>> if age ==10 :
print ('what\'s the best way to speak to a monster?')
print ('from as far away as possible!')
>>> age=10
>>> if age ==10 :
print ('what\'s the best way to speak to a monster?')
print ('from as far away as possible!')
what's the best way to speak to a monster?
from as far away as possible!
>>> age='10'
>>> converted_age=int(age)
>>> age=10
>>> converted_age=str(age)
>>> print age
10
>>> age
10
>>> age='10'
>>> converted_age=int(age)
>>> if converted_age==10 :
print ('what\'s the best way to speak to a monster?')
print ('from as far away as possible!')
what's the best way to speak to a monster?
from as far away as possible!
>>> age='10.5'
>>> converted_age=int(age)
Traceback (most recent call last):
File "<pyshell#105>", line 1, in <module>
converted_age=int(age)
ValueError: invalid literal for int() with base 10: '10.5'
>>> converted_age=float(age)
>>> converted_age
10.5
>>> age='ten'
>>> converted_age=int(age)
Traceback (most recent call last):
File "<pyshell#109>", line 1, in <module>
converted_age=int(age)
ValueError: invalid literal for int() with base 10: 'ten'
>>> cakes=250
>>> if cakes <=100 or cakes>=500 :
print ('too few or too many')
else :
print ('just right')
just right
>>> cakes = 600
>>> if cakes <=100 or cakes >=500 :
print ('too few or too many')
else :
print ('just right')
too few or too many
>>> cakes =600
>>> if cakes <=1000 or cakes >= 5000 :
print ('too few or too many')
else :
print ('just right')
too few or too many
>>> ninjas=5
>>> if ninjas < 10 :
print ('I can fight those ninjas')
elif ninjas <30 :
print ('It will be a struggle')
elif ninjas < 50 :
print ('That\'s too many!')
I can fight those ninjas
>>> ninjas=15
>>> if ninjas < 10 :
print ('I can fight those ninjas')
elif ninjas <30 :
print ('It will be a struggle')
elif ninjas < 50 :
print ('That\'s too many!')
It will be a struggle
>>>
| UTF-8 | Python | false | false | 5,038 | py | 19 | py_for_kids_ch5.py | 18 | 0.626836 | 0.583565 | 0 | 239 | 20.075314 | 70 |
ytree-project/ytree | 15,367,393,005,077 | 782c0af1038d8b8927edbe30d369f005e305dca2 | 4c0e66f73a2a798e07fbb1aadddf49b20cc7a578 | /ytree/analysis/analysis_pipeline.py | 7fd38eb43c181c72e269e620b446fd35c27af51f | [
"BSD-3-Clause"
] | permissive | https://github.com/ytree-project/ytree | df39beae6e5e83ab6ffc122ddcb2c582d6189161 | 57d379f85048372d75fb197166289efd67a2e829 | refs/heads/main | 2023-05-27T17:36:01.765797 | 2023-05-23T08:56:27 | 2023-05-23T08:56:27 | 98,564,214 | 6 | 6 | NOASSERTION | false | 2023-05-23T08:56:29 | 2017-07-27T17:37:08 | 2023-05-20T08:07:08 | 2023-05-23T08:56:28 | 3,275 | 15 | 9 | 9 | Python | false | false | """
AnalysisPipeline class and member functions
"""
import os
from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only
from ytree.analysis.analysis_operators import AnalysisOperation
from ytree.utilities.io import ensure_dir
class AnalysisPipeline:
"""
Initialize an AnalysisPipeline.
An AnalysisPipeline allows one to create a workflow of analysis to be
performed on a node/halo in a tree. This is done by creating functions
that minimally accept a node as the first argument and providing these
to the AnalysisPipeline in the order they are meant to be run. This
makes it straightforward to organize an analysis workflow into a series
of distinct, reusable functions.
Parameters
----------
output_dir : optional, str
Path to a directory into which any files will be saved. The
directory will be created if it does not already exist.
Examples
--------
>>> import ytree
>>>
>>> def my_analysis(node):
... node["test_field"] = 2 * node["mass"]
>>>
>>> def minimum_mass(node, value):
... return node["mass"] > value
>>>
>>> def my_recipe(pipeline):
... pipeline.add_operation(my_analysis)
>>>
>>> def do_cleanup(node):
... print (f"End of analysis for {node}.")
>>>
>>> a = ytree.load("arbor/arbor.h5")
>>>
>>> ap = AnalysisPipeline()
>>> # don't analyze halos below 3e11 Msun
>>> ap.add_operation(minimum_mass, 3e11)
>>> ap.add_recipe(my_recipe)
>>> ap.add_recipe(do_cleanup, always_do=True)
>>>
>>> trees = list(a[:])
>>> for tree in trees:
... for node in tree["forest"]:
... ap.process_target(node)
>>>
>>> a.save_arbor(trees=trees)
"""
def __init__(self, output_dir=None):
self.actions = []
if output_dir is None:
output_dir = "."
self.output_dir = ensure_dir(output_dir)
self._preprocessed = False
def add_operation(self, function, *args, always_do=False, **kwargs):
"""
Add an operation to the AnalysisPipeline.
An operation is a function that minimally takes in a target object
and performs some actions on or with it. This function may alter the
object's state, attach attributes, write out data, etc. Operations
are used to create a pipeline of actions performed in sequence on a list
of objects, such as all halos in a merger tree. The function can,
optionally, return True or False to act as a filter, determining if the
rest of the pipeline should be carried out (if True) or if the pipeline
should stop and move on to the next object (if False).
Parameters
----------
function : callable
The function to be called for each node/halo.
*args : positional arguments
Any additional positional arguments to be provided to the funciton.
always_do: optional, bool
If True, always perform this operation even if a prior filter has
returned False. This can be used to add house cleaning operations
that should always be run.
Default: False
**kwargs : keyword arguments
Any keyword arguments to be provided to the function.
"""
if not callable(function):
raise ValueError("function argument must be a callable function.")
operation = AnalysisOperation(function, *args, always_do=always_do, **kwargs)
self.actions.append(operation)
def add_recipe(self, function, *args, **kwargs):
"""
Add a recipe to the AnalysisPipeline.
An recipe is a function that accepts an AnalysisPipeline and adds a
series of operations with calls to add_operation. This is a way of
creating a shortcut for a series of operations.
Parameters
----------
function : callable
A function accepting an AnalysisPipeline object.
*args : positional arguments
Any additional positional arguments to be provided to the funciton.
**kwargs : keyword arguments
Any keyword arguments to be provided to the function.
Examples
--------
>>> def print_field_value(node, field):
... print (f"Node {node} has {field} of {node[field]}.")
>>>
>>> def print_many_things(pipeline, fields):
... for field in fields:
... pipeline.add_operation(print_field_value, field)
>>>
>>> ap = ytree.AnalysisPipeline()
>>> ap.add_recipe(print_many_things, ["mass", "virial_radius"])
"""
if not callable(function):
raise ValueError("function argument must be a callable function.")
recipe = AnalysisOperation(function, *args, **kwargs)
recipe(self)
@parallel_root_only
def _preprocess(self):
"Create output directories and do any other preliminary steps."
if self._preprocessed:
return
for action in self.actions:
my_output_dir = action.kwargs.get("output_dir")
if my_output_dir is not None:
new_output_dir = ensure_dir(
os.path.join(self.output_dir, my_output_dir))
action.kwargs["output_dir"] = new_output_dir
self._preprocessed = True
def process_target(self, target):
"""
Process a node through the AnalysisPipeline.
All operations added to the AnalysisPipeline will be run on the
provided target.
Parameters
----------
target : :class:`~ytree.data_structures.tree_node.TreeNode`
The node on which to run the analysis pipeline.
"""
self._preprocess()
target_filter = True
for action in self.actions:
if target_filter or action.always_do:
rval = action(target)
if rval is not None:
target_filter &= bool(rval)
return target_filter
| UTF-8 | Python | false | false | 6,123 | py | 118 | analysis_pipeline.py | 90 | 0.607709 | 0.606402 | 0 | 178 | 33.398876 | 86 |
hbc/li_hiv_call3 | 549,755,858,408 | cde6c14a8bfd8299ccbf7619dbee177fbcdc7c44 | 9b45e8b8536a0ec1b6455c12bf6db8574687294a | /scripts/call_v3_loop.py | 77cfa2ae87215a402eee14273dfaea04a1a2a25f | [] | no_license | https://github.com/hbc/li_hiv_call3 | 14ac3700d426d840e1f44410fd613562a6759e27 | 9847d0a13f4facb885d5d713b6c36163bb39e0d1 | refs/heads/master | 2020-12-24T02:36:24.596873 | 2016-07-18T00:42:18 | 2016-07-18T00:42:18 | 29,584,225 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""Collapse reads on V3 loop and organize for calling.
"""
import os
import subprocess
import sys
import yaml
def main(config_file, bam_file):
with open(config_file) as in_handle:
config = yaml.safe_load(in_handle)
#v3_region = _read_regions_file(config["regions"])
v3_region = "HIVHXB2CG:7110-7217"
[fq1, fq2], [full_fq1, full_fq2] = _select_fastq_in_region(bam_file, v3_region)
merged_fq = _merge_fastq(fq1, fq2, config)
print calculate_kmers(merged_fq)
print align_to_control(full_fq1, full_fq2, config)
def align_to_control(fq1, fq2, config):
"""Align fastqs to control file, counting hits per input.
"""
ref_file = config["controls"]["V3-Loop"]
if not os.path.exists(ref_file + ".bwt"):
cmd = "bwa index {ref_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
out_file = "%s-v3_counts.txt" % os.path.splitext(fq1)[0]
if not os.path.exists(out_file):
cmd = ("bwa mem {ref_file} {fq1} {fq2} | samtools view -F 4 - | cut -f 3 | sort | uniq -c > {out_file}")
subprocess.check_call(cmd.format(**locals()), shell=True)
return out_file
def calculate_kmers(merged_fq):
"""Use jellyfish to count kmers in the input file.
"""
jf_file = "%s.jf" % os.path.splitext(merged_fq)[0]
if not os.path.exists(jf_file):
cmd = "jellyfish count -s 100M -m 105 -o {jf_file} {merged_fq}"
subprocess.check_call(cmd.format(**locals()), shell=True)
kmer_file = "%s.kmer" % os.path.splitext(merged_fq)[0]
if not os.path.exists(kmer_file):
cmd = "jellyfish dump -c -t -L 100 {jf_file} | sort -k 2nr > {kmer_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
return kmer_file
def _read_regions_file(in_file):
with open(in_file) as in_handle:
for line in in_handle:
contig, start, end, name = line.strip().split()
if name.lower().startswith("v3"):
return "%s:%s-%s" % (contig, start, end)
def _merge_fastq(fq1, fq2, config):
"""Extract paired end reads and merge using pear.
"""
base_out = os.path.splitext(fq1)[0].replace("-1", "-pear")
merged_out = "%s.assembled.fastq" % base_out
if not os.path.exists(merged_out):
cmd = ["pear", "-q", str(config["params"]["pear"]["quality_thresh"]),
"-v", str(config["params"]["pear"]["min_overlap"]),
"-t", str(config["params"]["pear"]["min_trim_length"]),
"-u", str(config["params"]["pear"]["max_uncalled_base"]),
"-f", fq1, "-r", fq2, "-o", base_out]
subprocess.check_call(cmd)
return merged_out
def _select_fastq_in_region(bam_file, region):
"""Extract paired end reads where either read overlaps the region of interest.
"""
work_dir = os.path.join(os.getcwd(), "v3_fastq")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
full_fq1, full_fq2 = _select_full_fastqs(bam_file, work_dir)
base, ext = os.path.splitext(os.path.basename(bam_file))
base = os.path.join(work_dir, base)
region_str = region.replace(":", "_").replace("-", "_")
fq1 = "%s-%s-1.fastq" % (base, region_str)
fq2 = "%s-%s-2.fastq" % (base, region_str)
name_file = "%s-%s-names.txt" % (base, region_str)
keep_file = "%s-keep%s" % os.path.splitext(name_file)
if not os.path.exists(keep_file):
cmd = ("samtools view {bam_file} {region} | cut -f 1 | sort | uniq > {name_file}")
subprocess.check_call(cmd.format(**locals()), shell=True)
with open(name_file) as in_handle:
with open(keep_file, "w") as out_handle:
for line in in_handle:
out_handle.write("%s/1\n" % line.strip())
out_handle.write("%s/2\n" % line.strip())
for orig_file, out_file in [(full_fq1, fq1), (full_fq2, fq2)]:
if not os.path.exists(out_file):
cmd = "seqtk subseq {orig_file} {keep_file} > {out_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
return [fq1, fq2], [full_fq1, full_fq2]
def _select_full_fastqs(bam_file, work_dir):
base, ext = os.path.splitext(os.path.basename(bam_file))
base = os.path.join(work_dir, base)
full_fq1 = "%s-1.fastq" % base
full_fq2 = "%s-2.fastq" % base
if not os.path.exists(full_fq1):
cmd = ("bamtofastq F={full_fq1} F2={full_fq2} S=/dev/null O=/dev/null O2=/dev/null collate=1 "
"filename={bam_file}")
subprocess.check_call(cmd.format(**locals()), shell=True)
return full_fq1, full_fq2
if __name__ == "__main__":
main(*sys.argv[1:]) | UTF-8 | Python | false | false | 4,655 | py | 19 | call_v3_loop.py | 14 | 0.593985 | 0.576369 | 0 | 108 | 42.111111 | 112 |
linfengzhou/LeetCode | 12,481,175,006,060 | dd6b491dbd416493a6c275bcd82cdbf42677317d | aeeaf40350a652d96a392010071df8a486c6e79f | /archive/python/Python/binary_tree/98.validate-binary-search-tree.py | ad8ead0f576bbd1e3344c4b6e42581c2091ba978 | [
"MIT"
] | permissive | https://github.com/linfengzhou/LeetCode | 11e6c12ce43cf0053d86437b369a2337e6009be3 | cb2ed3524431aea2b204fe66797f9850bbe506a9 | refs/heads/master | 2021-01-23T19:34:37.016755 | 2018-04-30T20:44:40 | 2018-04-30T20:44:40 | 53,916,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import sys
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
balance, min_value, max_value = self.helper(root)
return balance
def helper(self, root):
if not root:
return True, sys.maxint, -sys.maxint
left_balance, left_min, left_max = self.helper(root.left)
right_balance, right_min, right_max = self.helper(root.right)
if (left_balance and right_balance)\
and (root.val > left_max)\
and (root.val < right_min):
return True, min(root.val, left_min), max(right_max, root.val)
return False, -1, -1
| UTF-8 | Python | false | false | 869 | py | 230 | 98.validate-binary-search-tree.py | 214 | 0.55351 | 0.551208 | 0 | 32 | 26.15625 | 74 |
pcieslinski/log_parser | 16,209,206,580,278 | 2906dbf9c9d3aede86344c746882e80250f529f4 | d5cf8601eee96e0570795324d4cf257f9c27ec97 | /log_parser/renderers/__init__.py | cbcfd2fca67e049ff9069911901fa963f1f73b71 | [] | no_license | https://github.com/pcieslinski/log_parser | 13f20ea2a75455a026c372cb60c365abac50fb20 | b2acfa249ccc0ece492b282d3d144ead9c33f01d | refs/heads/master | 2021-07-06T22:03:26.114935 | 2020-08-30T13:36:12 | 2020-08-30T13:36:12 | 236,012,442 | 1 | 0 | null | false | 2021-04-20T19:14:07 | 2020-01-24T13:58:12 | 2020-08-30T13:36:21 | 2021-04-20T19:14:06 | 1,649 | 0 | 0 | 1 | Python | false | false | from log_parser.renderers.error_renderer import ErrorRenderer
from log_parser.renderers.stats_output_renderer import StatsOutputRenderer
__all__ = [
"ErrorRenderer",
"StatsOutputRenderer"
]
| UTF-8 | Python | false | false | 200 | py | 40 | __init__.py | 36 | 0.775 | 0.775 | 0 | 8 | 24 | 74 |
Nisarg-S/coop-fall-2020-challenge | 5,205,500,399,607 | 2128f66bb678468d7efe57c0790327840005e8ba | e7801b1355615d7c11dd7a200db39532e6558c96 | /solution_python.py | 2c20c9874708dc88d23042b61899caa2e17d18e7 | [] | no_license | https://github.com/Nisarg-S/coop-fall-2020-challenge | 88cc20d7ec39bfb5eea98a1d50b01bc2556a1c99 | f3e86b3e977d6455fc78e0920e86a440f3a3fa63 | refs/heads/master | 2022-11-14T00:20:34.085412 | 2020-07-10T18:04:44 | 2020-07-10T18:04:44 | 278,688,492 | 0 | 0 | null | true | 2020-07-10T17:00:19 | 2020-07-10T17:00:18 | 2020-07-10T17:00:04 | 2020-07-10T14:48:49 | 87 | 0 | 0 | 0 | null | false | false | class EventSourcer():
# Do not change the signature of any functions
def __init__(self):
self.value = 0
# holds value of num at a moment in time i.e action1, action2 etc..
self.history = []
# holds the differential values (i.e current value - previous value) for the redo queue (last index is latest undo)
self.redo_queue = []
def add(self, num: int):
self.history.append(self.value)
self.value += num
# if there is a redo chain, once a new value is set, the redo step for the next action is voided
if len(self.redo_queue):
self.redo_queue.pop(-1)
def subtract(self, num: int):
self.history.append(self.value)
self.value -= num
# if there is a redo chain, once a new value is set, the redo step for the next action is voided
if len(self.redo_queue):
self.redo_queue.pop(-1)
def undo(self):
# if there is history to be undone
if len(self.history):
# set the differential value from the undo
self.redo_queue.append(self.value - self.history[-1])
# set current value as previous value
self.value = self.history[-1]
# remove last action from history
self.history.pop()
def redo(self):
# if there actions to be redone
if len(self.redo_queue):
# redo the differential change done by the undo
self.value += self.redo_queue[-1]
# add current state to history
self.history.append(self.value)
# remove last differential from redo chain
self.redo_queue.pop(-1)
def bulk_undo(self, steps: int):
for i in range(steps):
# if there is nothing left to undo, break
if not len(self.history):
break
self.undo()
def bulk_redo(self, steps: int):
for i in range(steps):
# if there is nothing left to redo, break
if not len(self.redo_queue):
break
self.redo()
| UTF-8 | Python | false | false | 2,100 | py | 1 | solution_python.py | 1 | 0.57381 | 0.569524 | 0 | 57 | 35.842105 | 123 |
0xb01u/AoC2019 | 18,433,999,657,471 | 39541fcc8ceede25b8763f3496b410ca0f6ab017 | 8dd651be2fb0cafe923597c05a20d7abb75c3326 | /Day_13/one.py | 6f105c6c608c4a99dbc3d1974c5699b5da8f7897 | [] | no_license | https://github.com/0xb01u/AoC2019 | 0afaf556134f3c2eb8b637c74d3287359ceab17d | 16ca3587365a87d701f0d36ffc7b845d76cfbbcc | refs/heads/master | 2022-03-14T18:53:41.614234 | 2019-12-23T17:37:50 | 2019-12-23T17:37:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Advent of Code 2019 - Day 13
# Care package
import sys
sys.path.append("../python_modules/custom")
from intcomputer import Intcomputer
arcade = Intcomputer(list(map(int, open("game.txt", "r").read().split(","))))
tiles = { 0: 0, 1: 0, 2: 0, 3: 0, 4: 0}
arcade.run()
for e in arcade.output()[2::3]:
tiles[e] += 1
print("Number of block tiles:", tiles[2])
| UTF-8 | Python | false | false | 386 | py | 44 | one.py | 39 | 0.629534 | 0.57513 | 0 | 17 | 21.705882 | 77 |
fpoli/view-spark-timeline | 17,884,243,826,241 | 9157cfb59e0b6d6a90f6aa8fbd8bf838b430a594 | be431a3e9d7aacd5815ee646070beb130aed1bca | /setup.py | 7778ae64983373070ced97a81a5e60784691063f | [
"MIT"
] | permissive | https://github.com/fpoli/view-spark-timeline | 056afd00ff06dda2b57f4131c07d2abc26af386d | 3c5d445404f97d1f805a93425c3b798011fcdb6f | refs/heads/master | 2022-07-14T11:53:22.652001 | 2020-03-20T13:58:18 | 2020-03-20T13:58:18 | 111,108,894 | 5 | 0 | MIT | false | 2022-07-05T21:06:33 | 2017-11-17T13:52:52 | 2020-11-09T11:47:41 | 2022-07-05T21:06:33 | 2,188 | 4 | 0 | 3 | Python | false | false | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
description = "Command line application to visualize the timeline of Spark executions."
main_ns = {}
with open("viewsparktimeline/version.py") as ver_file:
exec(ver_file.read(), main_ns)
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_descr = f.read()
setup(
name="view-spark-timeline",
version = main_ns['__version__'],
description = description,
long_description = long_descr,
license = "MIT",
url = "https://github.com/fpoli/view-spark-timeline",
author = "Federico Poli",
author_email = "federpoli@gmail.com",
packages = find_packages(exclude=["tests"]),
entry_points = {
"console_scripts": [
"view-spark-timeline = viewsparktimeline.cli:main"
]
},
install_requires = [
"svgwrite==1.1.11",
"ujson==1.35"
],
extras_require = {
"dev": [
"twine==1.9.1",
"nose==1.3.7",
"pycodestyle==2.3.1"
]
},
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5"
]
)
| UTF-8 | Python | false | false | 1,399 | py | 11 | setup.py | 6 | 0.578985 | 0.563259 | 0 | 57 | 23.54386 | 87 |
bopopescu/Machine_Learning_Collections | 2,585,570,339,806 | c666a796290d1195d4d1a1018a0247a2a62397b5 | 87086df54494cdd42a686413639892ebf4978a3e | /Kaggle_Challenges/genentech_2016 Cervical Cancer Screening/genentech-py-lucas/base_util.py | e878410c622235ecd6250835286d635dbbc5de36 | [] | no_license | https://github.com/bopopescu/Machine_Learning_Collections | acbe70d55a2551f98d7a86132608d432c957c369 | e77d81754e77c7acdfec86ec395892400657482e | refs/heads/master | 2022-04-11T01:39:09.848778 | 2019-09-15T15:23:31 | 2019-09-15T15:23:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
import logging
import sys
import pickle
import gzip
import os
import psycopg2
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from multiprocessing import cpu_count
import sklearn.metrics as metrics
import collections
import glob
NA_VAL = -100000
ID_COL = 'patient_id'
TARGET_COL = 'is_screener'
ROUND_PRED = 3
pd.options.display.float_format = '{:.6f}'.format
pd.set_option('max_columns', 100)
pd.set_option('max_rows', 100)
__GLOBAL_VARS = {}
__INPUT_DIR = '../data/input/'
__OUTPUT_DIR = '../data/output-py/'
__OUTPUT_RGF_DIR = '../data/output-rgf/'
__TEAM_DIR = '../data/team/'
__SUBMISSION_DIR = '../data/submission/'
__LOG_DIR = '../data/log/'
__LOG_FORMAT = "[%(asctime)s %(name)s %(levelname)-s] %(message)s"
__LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
__LOG_CONFIGURED = False
class StreamToLogger(object):
def __init__(self, logger, log_level, stream=None):
self.logger = logger
self.log_level = log_level
self.stream = stream
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
if self.stream is not None:
self.stream.write(buf)
self.stream.flush()
def flush(self):
if self.stream is not None:
self.stream.flush()
def close(self):
pass
def __get_process_log_name():
return 'LOG_HANDLER:' + str(os.getpid())
def __add_proc_log_to_global_vars(proc_log, replace_sys_streams):
proc_log_name = __get_process_log_name()
__GLOBAL_VARS[proc_log_name] = proc_log
__GLOBAL_VARS[proc_log_name + ':sys.stdout'] = sys.stdout
__GLOBAL_VARS[proc_log_name + ':sys.stderr'] = sys.stderr
if replace_sys_streams:
sys.stdout = StreamToLogger(proc_log, logging.INFO)
sys.stderr = StreamToLogger(proc_log, logging.ERROR)
def remove_proc_log():
proc_log_name = __get_process_log_name()
if proc_log_name in __GLOBAL_VARS:
del __GLOBAL_VARS[proc_log_name]
sys.stdout = __GLOBAL_VARS[proc_log_name + ':sys.stdout']
del __GLOBAL_VARS[proc_log_name + ':sys.stdout']
sys.stderr = __GLOBAL_VARS[proc_log_name + ':sys.stderr']
del __GLOBAL_VARS[proc_log_name + ':sys.stderr']
__add_proc_log_to_global_vars(proc_log=_ROOT_LOGGER, replace_sys_streams=False)
def get_log():
if __get_process_log_name() in __GLOBAL_VARS:
log = __GLOBAL_VARS[__get_process_log_name()]
else:
log = _ROOT_LOGGER
assert isinstance(log, logging.Logger)
return log
def config_file_log(fname, mode='w'):
fullname = __LOG_DIR + fname
if not fullname.endswith('.log'):
fullname += '.log'
remove_proc_log()
fdir = os.path.dirname(fullname)
if not os.path.exists(fdir):
os.makedirs(fdir)
proc_log = logging.getLogger(fname)
proc_log.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(fullname, mode=mode)
file_handler.setFormatter(fmt=logging.Formatter(fmt=__LOG_FORMAT, datefmt=__LOG_DATE_FORMAT))
file_handler.setLevel(logging.INFO)
proc_log.addHandler(file_handler)
__add_proc_log_to_global_vars(proc_log=proc_log,
replace_sys_streams=True)
if not __LOG_CONFIGURED:
__LOG_CONFIGURED = True
logging.basicConfig(
level=logging.DEBUG,
format=__LOG_FORMAT,
datefmt=__LOG_DATE_FORMAT, stream=sys.stdout)
_ROOT_LOGGER = logging.getLogger()
__add_proc_log_to_global_vars(proc_log=_ROOT_LOGGER, replace_sys_streams=False)
def reload_module(module):
import importlib
importlib.reload(module)
def tic():
__GLOBAL_VARS['.tic.timer'] = datetime.now()
def toc():
if '.tic.timer' in __GLOBAL_VARS:
get_log().info('Elapsed time: %s', str(datetime.now() - __GLOBAL_VARS['.tic.timer']))
def get_input_path(fname):
return __INPUT_DIR + fname
def get_output_path(fname):
return __OUTPUT_DIR + fname
def get_team_path(fname):
return __TEAM_DIR + fname
def get_output_rgf_path(fname):
return __OUTPUT_RGF_DIR + fname
def save_data(**kwargs):
for name in kwargs:
if isinstance(kwargs[name], pd.DataFrame) or isinstance(kwargs[name], pd.Series):
with pd.HDFStore(get_output_path(name + '.h5'), mode='w', complevel=9, complib='blosc') as store:
store[name] = kwargs[name]
else:
with gzip.open(get_output_path(name + '.pklz'), 'wb') as out_stream:
pickle.dump(kwargs[name], out_stream)
def copy_saved_data(**kwargs):
for name in kwargs:
value = load_data(kwargs[name])
save_data(**{name: value})
def load_data(name):
h5_path = get_output_path(name + '.h5')
if os.path.exists(h5_path):
with pd.HDFStore(h5_path, mode='r') as store:
return store[name]
else:
with gzip.open(get_output_path(name + '.pklz'), 'rb') as out_stream:
return pickle.load(out_stream)
def load_df_suffix(name, suffix):
df = load_data(name + suffix)
df.columns += suffix
return df
# noinspection PyUnresolvedReferences
def get_kfold_ids(k_list=None):
data = load_data('data_all_out')
kfold = np.sort(data['cv_index'].unique())
yield_k = lambda x: k_list is None or x in k_list
# noinspection PyTypeChecker
for k in kfold[kfold > 0]:
if yield_k(k):
# noinspection PyTypeChecker
# tr_ix = data.index[
# np.logical_and(
# np.logical_and(data['cv_index'] != k, data['cv_index'] > 0),
# ~data['exclude'])
# ]
tr_ix = data.index[
np.logical_and(data['cv_index'] != k, data['cv_index'] > 0)
]
val_ix = data.index[np.logical_and(data['cv_index'] == k, ~data['exclude'])]
yield k, tr_ix, val_ix
if yield_k(0):
# tr_ix = data.index[np.logical_and(data['cv_index'] > 0, ~data['exclude'])]
tr_ix = data.index[data['cv_index'] > 0]
yield 0, tr_ix, data.index[data['cv_index'] == 0]
def auc(actual, pred):
return metrics.roc_auc_score(y_true=actual, y_score=pred)
def add_inc_pred(data_pred_df, pred, n, pred_col='Pred'):
if n == 0:
data_pred_df[pred_col] = 0
data_pred_df[pred_col] *= n
data_pred_df[pred_col] += pred
data_pred_df[pred_col] /= n + 1
# return data_pred_df
def load_if_str(data):
if isinstance(data, str):
data = load_data(name=data)
return data
def get_prediction_summary(data_pred_df, pred_cols=None, do_print=True, transpose=True, percentiles=None):
data_pred_df = load_if_str(data_pred_df)
if pred_cols is None:
pred_cols = data_pred_df.columns
if percentiles is None:
percentiles = []
pred_summary = data_pred_df.describe(percentiles=percentiles)
data_all_out = load_data('data_all_out')
data_all_out = data_all_out[pd.notnull(data_all_out[TARGET_COL])]
data_pred_df_actual = pd.merge(left=data_all_out, right=data_pred_df, left_index=True, right_index=True)
if len(data_pred_df_actual) > 0:
score_ix = len(pred_summary)
for pred_col in pred_cols:
try:
pred_sel = pd.notnull(data_pred_df_actual[pred_col])
score = auc(actual=data_pred_df_actual.ix[pred_sel, TARGET_COL],
pred=data_pred_df_actual.ix[pred_sel, pred_col].round(decimals=ROUND_PRED))
except ValueError:
score = np.nan
pred_summary.loc[score_ix, pred_col] = score
pred_summary.index = list(pred_summary.index[:-1]) + ['auc']
if transpose:
pred_summary = pred_summary.transpose()
if do_print:
get_log().info('\nPrediction summary:\n%s' % pred_summary.to_string())
else:
return pred_summary
def unlist_dataframe(df_list):
if isinstance(df_list, pd.DataFrame):
df_all = df_list.copy()
else:
df_all = None
for df in df_list:
if df_all is None:
df_all = df.copy()
else:
df_all = df_all.append(df)
df_all.sort_index(inplace=True)
return df_all
def write_submission(name, data_pred_df=None, pred_col='Pred', suffix=''):
if data_pred_df is None:
data_pred_df = load_data(name)
if not os.path.exists(__SUBMISSION_DIR):
os.makedirs(__SUBMISSION_DIR)
if pred_col not in data_pred_df.columns:
pred_col = get_default_input_col(data_pred_df)[0]
data_all_out = load_data('data_all_out')
data_all_out = data_all_out[pd.isnull(data_all_out[TARGET_COL])]
data_sub_df = data_pred_df.ix[data_all_out.index][[pred_col]]
pred_col = 'predict_screener'
data_sub_df.columns = [pred_col]
data_sub_df[pred_col] = data_sub_df[pred_col].round(decimals=ROUND_PRED)
with gzip.open(__SUBMISSION_DIR + name + suffix + '.csv.gz', 'wt') as fout:
data_sub_df.to_csv(path_or_buf=fout)
# return data_sub_df
def write_team_csv(name, data_pred_df=None):
if data_pred_df is None:
data_pred_df = load_data(name)
data_pred_df = load_if_str(data_pred_df)
if not os.path.exists(__TEAM_DIR):
os.makedirs(__TEAM_DIR)
# with gzip.open(__TEAM_DIR + name + '.csv.gz', 'wt') as fout:
# data_pred_df.to_csv(path_or_buf=fout, index=True, float_format='%1.3f')
data_pred_df.to_csv(path_or_buf=__TEAM_DIR + name + '.csv.bz2', index=True, float_format='%1.3f', compression='bz2')
# return data_sub_df
def get_as_list(input_col):
if isinstance(input_col, str) or not isinstance(input_col, list):
input_col = [input_col]
return input_col
def get_last_value_series(x):
return x.fillna(method='ffill')
# r = x.copy()
# last_val = np.nan
# for iloc, val in enumerate(x):
# r.iloc[iloc] = last_val
# if not np.isnan(val):
# last_val = val
# return r
def get_next_value_series(x):
return x.fillna(method='bfill')
# return get_last_value_series(x.iloc[::-1]).iloc[::-1]
def get_aggr(data, group_col, input_col, fun='size'):
group_col = get_as_list(group_col)
input_col = get_as_list(input_col)
data_group = data[group_col + input_col].groupby(group_col)
data_counts = data_group.agg(fun)
if len(group_col) == 1:
agg_index = pd.Index(data.ix[:, group_col].values.reshape(-1), dtype='object')
else:
agg_index = pd.Index(data.ix[:, group_col].values, dtype='object')
data_counts = data_counts.ix[agg_index]
data_counts.fillna(0)
return data_counts
def get_ordinal_recode(data, input_col=None, enc_all=False):
if input_col is None:
input_col = get_default_input_col(data)
for col_nam in input_col:
# print(col_nam)
if data[col_nam].dtype == object or enc_all:
data[col_nam] = preprocessing.LabelEncoder().fit_transform(data[col_nam].fillna('na_val').values)
def get_random_ordinal_recode(data, input_col=None, enc_all=False, random_state=8393478):
if input_col is None:
input_col = get_default_input_col(data)
if isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
for col_nam in input_col:
if data[col_nam].dtype == object or enc_all:
unique_val = np.unique(data[col_nam].values)
recode_df = pd.DataFrame({'Val': random_state.permutation(len(unique_val))}, index=unique_val)
data[col_nam] = recode_df.ix[data[col_nam].values, 'Val'].values
def shuff_data(data, input_col=None, random_state=None):
if input_col is None:
input_col = get_default_input_col(data)
if random_state is not None:
np.random.seed(random_state)
for col_nam in input_col:
data_col_unique = data[col_nam].unique()
data_col_shuff = np.copy(data_col_unique)
np.random.shuffle(data_col_shuff)
data_map = pd.DataFrame({'NewVal': data_col_shuff}, index=data_col_unique)
data[col_nam] = data_map.ix[data[col_nam], 'NewVal'].values
return data
def get_xy_data_split(data, ids, input_col=None, y_transf=None):
if input_col is None:
input_col = get_default_input_col(data)
elif isinstance(input_col, str):
input_col = load_data(input_col)
data_x = data.ix[ids, input_col].values.astype(float)
data_y = data.ix[ids, TARGET_COL].values.astype(float)
if y_transf is not None:
data_y = y_transf(data_y)
return data_x, data_y
def get_default_input_col(data):
col_blacklist = []
return [nam for nam in data.columns if nam not in col_blacklist + [TARGET_COL]]
def get_xgb_data_split(**kwargs):
data_x, data_y = get_xy_data_split(**kwargs)
return get_xgb_data_matrix(data_x=data_x, data_y=data_y)
def get_xgb_data_matrix(data_x, data_y, missing=NA_VAL):
# if isinstance(data_x, pd.DataFrame):
# data_x.columns = replace_nonalpha_lst(data_x.columns)
if data_y is None or np.any(pd.isnull(data_y)):
xg_data = xgb.DMatrix(data_x, missing=missing)
else:
xg_data = xgb.DMatrix(data_x, label=data_y, missing=missing)
return xg_data
def get_xgb_eval_transf(transf):
return lambda pred, data_xg: ('auc', auc(actual=transf.pred(data_xg.get_label()), pred=transf.pred(pred)))
def get_xgb_eval(pred, data_xg):
return 'auc', auc(actual=data_xg.get_label(), pred=pred)
def get_xgb_score(model, input_col=None, do_print=True):
model_score = model.get_fscore()
model_score_fnam = [None] * len(model_score)
model_score_fval = [None] * len(model_score)
for ix_score, score_item in enumerate(model_score.items()):
score_fnam, score_val = score_item
if input_col is None or isinstance(score_fnam, str):
model_score_fnam[ix_score] = score_fnam
else:
ix_col = int(score_fnam[1:])
model_score_fnam[ix_score] = input_col[ix_col]
model_score_fval[ix_score] = score_val
model_score_fval = np.divide(model_score_fval, sum(model_score_fval))
model_score_df = pd.DataFrame({'Feature': model_score_fnam, 'Score': model_score_fval})
model_score_df.set_index('Feature', inplace=True)
model_score_df.sort_values(by=['Score'], ascending=False, inplace=True)
model_score_df['Order'] = range(1, len(model_score_fval) + 1)
if do_print:
get_log().info('\nModel features score:\n%s' % model_score_df.to_string())
# return model_score
def get_lr_score(model, input_col, do_print=True):
model_score_df = pd.DataFrame({'Coef': model.coef_}, index=pd.Index(input_col, name='Col'))
model_score_df['Score'] = np.abs(model_score_df['Coef'])
model_score_df['Score'] /= model_score_df['Score'].sum()
model_score_df.sort_values(by=['Score'], ascending=False, inplace=True)
model_score_df['Order'] = range(1, model_score_df.shape[0] + 1)
if do_print:
get_log().info('\nModel features score:\n%s' % model_score_df.to_string())
# return model_score_df
def get_rf_score(model, input_col, do_print=True):
model_score_df = pd.DataFrame({'Score': model.feature_importances_}, index=pd.Index(input_col, name='Col'))
# pd.set_option('max_columns', max([model_score_df.shape[1], 100]))
# pd.set_option('max_rows', max([model_score_df.shape[0], 100]))
model_score_df.sort_values(by=['Score'], ascending=False, inplace=True)
model_score_df['Order'] = range(1, model_score_df.shape[0] + 1)
if do_print:
get_log().info('\nModel features score:\n%s' % model_score_df.to_string())
return model_score_df
def get_et_score(**kwargs):
return get_rf_score(**kwargs)
def get_data_pred_df(ids):
return pd.DataFrame(data={'Pred': 0}, index=pd.Index(ids, name='Id'))
def get_pred_df(data_pred_names, scale=False, preffix='pred_'):
data_pred_df = load_data('data_all_out')
for nam in data_pred_names:
data_pred_cur = load_data(preffix + nam)
if scale:
data_pred_cur['Pred'] = preprocessing.StandardScaler().fit_transform(data_pred_cur['Pred'].values)
new_nam = nam.replace(preffix, '')
data_pred_df.ix[data_pred_cur.index, new_nam] = data_pred_cur['Pred']
return data_pred_df
def reescale(data, input_col=None, ignore_zeros=False):
if isinstance(data, str):
data = load_data(name=data)
if input_col is None:
input_col = get_default_input_col(data)
for nam in input_col:
if ignore_zeros:
not_zero = data[nam] != 0
data.ix[not_zero, nam] = preprocessing.StandardScaler().fit_transform(
data.ix[not_zero, [nam]].values.astype(float))[:, 0]
else:
data[nam] = preprocessing.StandardScaler().fit_transform(data[[nam]].values.astype(float))[:, 0]
# return data
def get_unique_count(data, cols=None):
if isinstance(data, str):
data = load_data(data)
if cols is None:
cols = data.columns
return [(col_nam, data[col_nam].nunique()) for col_nam in cols]
def get_core_count():
count = cpu_count()
if 1 < count <= 12:
count = int(count / 2)
return count
def add_cols(data, data_add, cols_add=None, cols_add_suffix=''):
data = load_if_str(data)
data_add = load_if_str(data_add)
if cols_add is None:
cols_add = data_add.columns
for col in cols_add:
data.ix[data_add.index, col + cols_add_suffix] = data_add[col]
return data
def identity(x):
return x
def get_identity_transform():
return TransformY(y=identity, pred=identity)
def get_log_transform():
return TransformY(
y=lambda x: np.sign(x) * np.log1p(np.abs(x)),
pred=lambda x: np.sign(x) * np.expm1(np.abs(x))
)
# noinspection PyTypeChecker
def get_power_transform(power):
return TransformY(
y=lambda x: np.sign(x) * np.power(np.abs(x), power),
pred=lambda x: np.sign(x) * np.power(np.abs(x), 1. / power)
)
TransformY = collections.namedtuple('TransformY', ['y', 'pred'])
def get_iloc(data, *kargs):
ret_val = [None] * len(kargs)
for ix, ids in enumerate(kargs):
ret_val[ix] = np.array([data.index.get_loc(oid) for oid in ids], int)
return tuple(ret_val)
def call_cv_train_sequential(train_func, args_iterator=None):
if args_iterator is None:
args_iterator = get_kfold_ids()
return unlist_dataframe([train_func(*args) for args in args_iterator])
def call_cv_train_parallel(train_func, args_iterator=None):
if args_iterator is None:
args_iterator = get_kfold_ids()
from multiprocessing import Pool
pool = Pool(get_core_count())
retval = unlist_dataframe(pool.starmap(train_func, args_iterator))
pool.terminate()
return retval
def call_func_parallel(func, args_iterator, workers=-1):
from multiprocessing import Pool
if workers == -1:
workers = get_core_count()
pool = Pool(workers)
pool.starmap(func, args_iterator)
pool.terminate()
def save_rgf_cfg(cfg_params, file):
with open(file, 'w') as fout:
for pnam, pval in cfg_params.items():
if pval is None:
fout.write('%s\n' % pnam)
else:
fout.write('%s=%s\n' % (pnam, str(pval)))
def print_stages(actual, stage_predictions):
count = 0
iters = []
loss = []
count_factor = 50
for prediction in stage_predictions:
count += 1
if count in [1, 10, 50] or count % count_factor == 0:
iters.append(count)
loss.append(auc(actual=actual, pred=prediction))
if count > 1000:
count_factor = 500
elif count > 500:
count_factor = 200
elif count > 250:
count_factor = 100
loss_df = pd.DataFrame({'Iteration': iters, 'Loss': loss})
loss_df.rename(columns={'Loss': 'auc'}, inplace=True)
get_log().info('\nLoss:\n%s' % loss_df.to_string())
return loss_df
def replace_nonalpha_lst(str_list):
import re
str_repl = list(str_list)
for ix, val in enumerate(str_repl):
str_repl[ix] = re.sub(r'[\W_]+', '', val)
return str_repl
def get_lastet_file(search_path):
return max(glob.iglob(search_path), key=os.path.getmtime)
def to_multi_index(values):
if values.shape[1] > 1:
return pd.Index(values, dtype='object')
else:
return pd.Index(values[:, 0])
def __db_connect():
creds = pd.read_csv(get_input_path('credentials.csv'))
cs = str(creds.loc[creds.field=='REDSHIFT', 'value'].values)
return psycopg2.connect(cs)
def exec_cmd_db(cmd):
with __db_connect() as conn:
with conn.cursor() as cur:
cur.execute(cmd)
conn.commit()
def load_from_db(query):
with __db_connect() as con:
return pd.read_sql_query(sql=query, con=con)
def copy_all_fields(to_data, from_data):
from_data = load_if_str(from_data)
for col_nam in from_data.columns:
to_data[col_nam] = from_data[col_nam]
def copy_all_fields_cv(to_data, from_data, k):
from_data = drop_other_cv_cols(data=from_data, k=k)
copy_all_fields(to_data=to_data, from_data=from_data)
def drop_other_cv_cols(data, k):
data = load_if_str(data)
cols_drop = [col_nam for col_nam in data.columns if "_cv_ix_" in col_nam and ("_cv_ix_" + str(k)) not in col_nam]
data.drop(cols_drop, axis='columns', inplace=True)
return data
def calc_likelihood_db(table, fields, prev_max_vals=0, aggr='max', min_count=1, where1="", where2=""):
data_all_out = load_data('data_all_out')
global_avg = data_all_out['is_screener'].mean().round(3)
fields = get_as_list(fields)
fields_t4 = ", ".join(['t4.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_t3 = ", ".join(['t3.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_declare = ", ".join([f + " " + 'g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_join = " and ".join(['l1.g' + str(ix + 1) + " = " + 'l2.g' + str(ix + 1) for ix, f in enumerate(fields)])
prev_vals_select = ""
prev_vals_nth = ""
if prev_max_vals > 0:
prev_vals_select = "".join([
" {aggr}(cv1_avg_{prev}_nth) as cv1_avg_{prev}_nth, "
" {aggr}(cv2_avg_{prev}_nth) as cv2_avg_{prev}_nth, "
" {aggr}(cv3_avg_{prev}_nth) as cv3_avg_{prev}_nth, "
" {aggr}(cv12_avg_{prev}_nth) as cv12_avg_{prev}_nth, "
" {aggr}(cv13_avg_{prev}_nth) as cv13_avg_{prev}_nth, "
" {aggr}(cv23_avg_{prev}_nth) as cv23_avg_{prev}_nth, "
" {aggr}(cv123_avg_{prev}_nth) as cv123_avg_{prev}_nth, ".format(
prev=nth + 1, aggr=aggr
)
for nth in range(prev_max_vals)])
prev_sort = ''
if aggr == 'max':
prev_sort = 'desc'
if aggr == 'min':
prev_sort = 'asc'
prev_vals_nth = "".join([
" nth_value(l1.cv1_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv1_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv1_avg_{prev}_nth, "
" nth_value(l1.cv2_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv2_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv2_avg_{prev}_nth, "
" nth_value(l1.cv3_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv3_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv3_avg_{prev}_nth, "
" nth_value(l1.cv12_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv12_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv12_avg_{prev}_nth, "
" nth_value(l1.cv13_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv13_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv13_avg_{prev}_nth, "
" nth_value(l1.cv23_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv23_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv23_avg_{prev}_nth, "
" nth_value(l1.cv123_avg, {prev_val}) ignore nulls over("
" partition by patient_id order by cv123_avg {prev_sort}"
" rows between unbounded preceding and unbounded following) as cv123_avg_{prev}_nth, ".format(
prev_val=nth + 2, prev=nth + 1, prev_sort=prev_sort
)
for nth in range(prev_max_vals)])
min_count_where = "" if min_count <= 1 else (" where (t4.cv1_cnt + t4.cv2_cnt + t4.cv3_cnt) >= %d " % min_count)
sql = "select " \
" patient_id, " \
" {prev_max_vals_select} " \
" {aggr}(cv1_avg) as cv1_avg, " \
" {aggr}(cv2_avg) as cv2_avg, " \
" {aggr}(cv3_avg) as cv3_avg, " \
" {aggr}(cv12_avg) as cv12_avg, " \
" {aggr}(cv13_avg) as cv13_avg, " \
" {aggr}(cv23_avg) as cv23_avg, " \
" {aggr}(cv123_avg) as cv123_avg " \
"from " \
"(select " \
" {prev_max_vals_nth} " \
"* " \
"from " \
" (select " \
" {fields_t4}, " \
" (t4.cv1_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.g_smooth) as cv1_avg, " \
" (t4.cv2_pos + t4.g_smooth*t4.g_avg)/(t4.cv2_cnt + t4.g_smooth) as cv2_avg, " \
" (t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv3_cnt + t4.g_smooth) as cv3_avg, " \
" (t4.cv1_pos + t4.cv2_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.cv2_cnt + t4.g_smooth) as cv12_avg, " \
" (t4.cv1_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.cv3_cnt + t4.g_smooth) as cv13_avg, " \
" (t4.cv2_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv2_cnt + t4.cv3_cnt + t4.g_smooth) as cv23_avg, " \
" (t4.cv1_pos + t4.cv2_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg) / " \
" (t4.cv1_cnt + t4.cv2_cnt + t4.cv3_cnt + t4.g_smooth) as cv123_avg " \
" from ( " \
" select " \
" {fields_t3}, " \
" sum(cast(t3.cv_index = 1 as integer)) cv1_cnt, " \
" sum(cast(t3.cv_index = 1 as integer)*cast(is_screener as float)) as cv1_pos, " \
" sum(cast(t3.cv_index = 2 as integer)) cv2_cnt, " \
" sum(cast(t3.cv_index = 2 as integer)*cast(is_screener as float)) as cv2_pos, " \
" sum(cast(t3.cv_index = 3 as integer)) cv3_cnt, " \
" sum(cast(t3.cv_index = 3 as integer)*cast(is_screener as float)) as cv3_pos, " \
" {global_avg} as g_avg, " \
" 30 as g_smooth " \
" from " \
" ((select patient_id, {fields_declare} from {table} {where1}) t1 " \
" inner join " \
" (select patient_id, is_screener, cv_index from train_cv_indices where not is_screener is null) t2 " \
" on t1.patient_id = t2.patient_id) t3 " \
" group by {fields_t3}) t4 {min_count_where}) l1 " \
" inner join (select distinct patient_id, {fields_declare} from {table} {where2}) l2 on {fields_join} ) " \
"group by patient_id ".format(table=table, fields_t4=fields_t4, fields_t3=fields_t3,
fields_declare=fields_declare, fields_join=fields_join,
prev_max_vals_select=prev_vals_select,
prev_max_vals_nth=prev_vals_nth,
global_avg=str(global_avg),
aggr=aggr, min_count_where=min_count_where,
where1=where1, where2=where2)
# get_log().info('\n\n' + sql + '\n\n')
data_likelihood = load_from_db(sql)
data_likelihood.set_index('patient_id', inplace=True)
data_likelihood = data_likelihood.ix[data_all_out.index, :].copy()
data_likelihood.fillna(0.0, inplace=True)
# save_data(data_likelihood_last=data_likelihood)
cv_ix_all = np.sort(data_all_out['cv_index'].unique())
cv_ix_tr = cv_ix_all[cv_ix_all != 0]
data_likelihood_ret = data_likelihood[[]].copy()
for nth in range(prev_max_vals + 1):
nth_suffix = '' if nth == 0 else '_%d_nth' % nth
for k in cv_ix_all:
col_nam = aggr + "_avg" + nth_suffix + "_" + "_".join(fields) + "_cv_ix_" + str(k)
data_likelihood_ret[col_nam] = np.nan
cv_ix_tr_cur = cv_ix_tr[cv_ix_tr != k]
for k_val in cv_ix_all:
data_likelihood_ret.ix[data_all_out['cv_index'] == k_val, col_nam] = \
data_likelihood['cv%s_avg' % ''.join(list(cv_ix_tr_cur[cv_ix_tr_cur != k_val].astype(str))) +
nth_suffix]
return data_likelihood_ret
def calc_max_val_value_db(table, fields, prev_max_vals=1):
data_all_out = load_data('data_all_out')
global_avg = data_all_out['is_screener'].mean().round(3)
fields = get_as_list(fields)
fields_t4 = ", ".join(['t4.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_t3 = ", ".join(['t3.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_declare = ", ".join([f + " " + 'g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_join = " and ".join(['l1.g' + str(ix + 1) + " = " + 'l2.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_vals = " || ".join(['l1.g' + str(ix + 1) for ix, f in enumerate(fields)])
prev_max_vals_select = "".join([
" max(cv1_avg_{prev}_nth) as cv1_avg_{prev}_nth, "
" max(cv2_avg_{prev}_nth) as cv2_avg_{prev}_nth, "
" max(cv3_avg_{prev}_nth) as cv3_avg_{prev}_nth, "
" max(cv12_avg_{prev}_nth) as cv12_avg_{prev}_nth, "
" max(cv13_avg_{prev}_nth) as cv13_avg_{prev}_nth, "
" max(cv23_avg_{prev}_nth) as cv23_avg_{prev}_nth, "
" max(cv123_avg_{prev}_nth) as cv123_avg_{prev}_nth, ".format(
prev=nth + 1
)
for nth in range(prev_max_vals)])
prev_max_vals_nth = "".join([
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv1_avg desc"
" rows between unbounded preceding and unbounded following) as cv1_avg_{prev}_nth, "
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv2_avg desc"
" rows between unbounded preceding and unbounded following) as cv2_avg_{prev}_nth, "
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv3_avg desc"
" rows between unbounded preceding and unbounded following) as cv3_avg_{prev}_nth, "
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv12_avg desc"
" rows between unbounded preceding and unbounded following) as cv12_avg_{prev}_nth, "
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv13_avg desc"
" rows between unbounded preceding and unbounded following) as cv13_avg_{prev}_nth, "
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv23_avg desc"
" rows between unbounded preceding and unbounded following) as cv23_avg_{prev}_nth, "
" nth_value({fields_vals}, {prev}) ignore nulls over(partition by patient_id order by cv123_avg desc"
" rows between unbounded preceding and unbounded following) as cv123_avg_{prev}_nth, ".format(
prev=nth + 1, fields_vals=fields_vals
)
for nth in range(prev_max_vals)])
sql = "select " \
" {prev_max_vals_select} " \
" patient_id " \
"from " \
"(select " \
" {prev_max_vals_nth} " \
"* " \
"from " \
" (select " \
" {fields_t4}, " \
" (t4.cv1_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.g_smooth) as cv1_avg, " \
" (t4.cv2_pos + t4.g_smooth*t4.g_avg)/(t4.cv2_cnt + t4.g_smooth) as cv2_avg, " \
" (t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv3_cnt + t4.g_smooth) as cv3_avg, " \
" (t4.cv1_pos + t4.cv2_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.cv2_cnt + t4.g_smooth) as cv12_avg, " \
" (t4.cv1_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.cv3_cnt + t4.g_smooth) as cv13_avg, " \
" (t4.cv2_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv2_cnt + t4.cv3_cnt + t4.g_smooth) as cv23_avg, " \
" (t4.cv1_pos + t4.cv2_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg) / " \
" (t4.cv1_cnt + t4.cv2_cnt + t4.cv3_cnt + t4.g_smooth) as cv123_avg " \
" from ( " \
" select " \
" {fields_t3}, " \
" sum(cast(t3.cv_index = 1 as integer)) cv1_cnt, " \
" sum(cast(t3.cv_index = 1 as integer)*cast(is_screener as float)) as cv1_pos, " \
" sum(cast(t3.cv_index = 2 as integer)) cv2_cnt, " \
" sum(cast(t3.cv_index = 2 as integer)*cast(is_screener as float)) as cv2_pos, " \
" sum(cast(t3.cv_index = 3 as integer)) cv3_cnt, " \
" sum(cast(t3.cv_index = 3 as integer)*cast(is_screener as float)) as cv3_pos, " \
" {global_avg} as g_avg, " \
" 30 as g_smooth " \
" from " \
" ((select patient_id, {fields_declare} from {table}) t1 " \
" inner join " \
" (select patient_id, is_screener, cv_index from train_cv_indices where not is_screener is null) t2 " \
" on t1.patient_id = t2.patient_id) t3 " \
" group by {fields_t3}) t4) l1 " \
" inner join (select distinct patient_id, {fields_declare} from {table}) l2 on {fields_join} ) " \
"group by patient_id ".format(table=table, fields_t4=fields_t4, fields_t3=fields_t3,
fields_declare=fields_declare, fields_join=fields_join,
prev_max_vals_select=prev_max_vals_select,
prev_max_vals_nth=prev_max_vals_nth,
global_avg=str(global_avg))
# get_log().info('\n\n' + sql + '\n\n')
data_max_val = load_from_db(sql)
data_max_val.set_index('patient_id', inplace=True)
data_max_val = data_max_val.ix[data_all_out.index, :].copy()
data_max_val.fillna('NA_VAL', inplace=True)
# save_data(data_max_val_last=data_max_val)
cv_ix_all = np.sort(data_all_out['cv_index'].unique())
cv_ix_tr = cv_ix_all[cv_ix_all != 0]
data_max_val_ret = data_max_val[[]].copy()
for nth in range(prev_max_vals):
nth_suffix = '_%d_nth' % (nth + 1)
for k in cv_ix_all:
col_nam = "val" + nth_suffix + "_" + "_".join(fields) + "_cv_ix_" + str(k)
data_max_val_ret[col_nam] = np.nan
cv_ix_tr_cur = cv_ix_tr[cv_ix_tr != k]
for k_val in cv_ix_all:
data_max_val_ret.ix[data_all_out['cv_index'] == k_val, col_nam] = \
data_max_val['cv%s_avg' % ''.join(list(cv_ix_tr_cur[cv_ix_tr_cur != k_val].astype(str))) +
nth_suffix]
return data_max_val_ret
def calc_likelihood_count_db(table, fields, intervals, return_query=False):
# table = 'diagnosis_feats'
# fields = ['diagnosis_code']
# intervals = np.array([0.0] + list(np.linspace(0.0, 1.0, num=21)[2:]))
data_all_out = load_data('data_all_out')
global_avg = data_all_out['is_screener'].mean().round(3)
fields = get_as_list(fields)
fields_t4 = ", ".join(['t4.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_t3 = ", ".join(['t3.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_declare = ", ".join([f + " " + 'g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_join = " and ".join(['l1.g' + str(ix + 1) + " = " + 'l2.g' + str(ix + 1) for ix, f in enumerate(fields)])
fields_cnt = ""
for ix in range(1, len(intervals)):
fields_mask = ' sum(cast((cv{cv}_avg >{eq_low} {rng_low} and ' \
'cv{cv}_avg <= {rng_hi}) as integer)) as cv{cv}_bin_cnt_{ix}'. \
format(eq_low='' if ix > 1 else '=',
rng_low="{:1.2f}".format(intervals[ix - 1]),
rng_hi="{:1.2f}".format(intervals[ix]),
ix=ix, cv='{cv}')
if len(fields_cnt) > 0:
fields_cnt += ', \n'
fields_cnt += ", \n".join([fields_mask.format(cv=cv) for cv in ['1', '2', '3', '12', '13', '23', '123']])
# print(fields_cnt)
sql = "select \n" \
" patient_id, \n" \
"{fields_cnt} \n" \
"from \n" \
"(select \n" \
"* \n" \
"from \n" \
" (select \n" \
" {fields_t4}, \n" \
" (t4.cv1_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.g_smooth) as cv1_avg, \n" \
" (t4.cv2_pos + t4.g_smooth*t4.g_avg)/(t4.cv2_cnt + t4.g_smooth) as cv2_avg, \n" \
" (t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv3_cnt + t4.g_smooth) as cv3_avg, \n" \
" (t4.cv1_pos + t4.cv2_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.cv2_cnt + t4.g_smooth) \n" \
" as cv12_avg, \n" \
" (t4.cv1_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv1_cnt + t4.cv3_cnt + t4.g_smooth) \n" \
" as cv13_avg, \n" \
" (t4.cv2_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg)/(t4.cv2_cnt + t4.cv3_cnt + t4.g_smooth) \n" \
" as cv23_avg, \n" \
" (t4.cv1_pos + t4.cv2_pos + t4.cv3_pos + t4.g_smooth*t4.g_avg) / \n" \
" (t4.cv1_cnt + t4.cv2_cnt + t4.cv3_cnt + t4.g_smooth) as cv123_avg \n" \
" from ( \n" \
" select \n" \
" {fields_t3}, \n" \
" sum(cast(t3.cv_index = 1 as integer)) cv1_cnt, \n" \
" sum(cast(t3.cv_index = 1 as integer)*cast(is_screener as float)) as cv1_pos, \n" \
" sum(cast(t3.cv_index = 2 as integer)) cv2_cnt, \n" \
" sum(cast(t3.cv_index = 2 as integer)*cast(is_screener as float)) as cv2_pos, \n" \
" sum(cast(t3.cv_index = 3 as integer)) cv3_cnt, \n" \
" sum(cast(t3.cv_index = 3 as integer)*cast(is_screener as float)) as cv3_pos, \n" \
" {global_avg} as g_avg, \n" \
" 100 as g_smooth \n" \
" from \n" \
" ((select patient_id, {fields_declare} from {table}) t1 \n" \
" inner join \n" \
" (select patient_id, is_screener, cv_index from train_cv_indices where not is_screener is null) t2 \n" \
" on t1.patient_id = t2.patient_id) t3 \n" \
" group by {fields_t3}) t4) l1 \n" \
" inner join (select distinct patient_id, {fields_declare} from {table}) l2 on {fields_join} ) \n" \
"group by patient_id \n".format(table=table, fields_t4=fields_t4, fields_t3=fields_t3,
fields_declare=fields_declare, fields_join=fields_join,
global_avg=str(global_avg), fields_cnt=fields_cnt)
if return_query:
return sql
data_llh_cnt = load_from_db(sql)
data_llh_cnt.set_index('patient_id', inplace=True)
data_llh_cnt = data_llh_cnt.ix[data_all_out.index, :].copy()
data_llh_cnt.fillna(0.0, inplace=True)
# save_data(data_llh_cnt_last=data_llh_cnt)
cv_ix_all = np.sort(data_all_out['cv_index'].unique())
cv_ix_tr = cv_ix_all[cv_ix_all != 0]
data_llh_cnt_ret = data_llh_cnt[[]].copy()
for ix in range(1, len(intervals)):
for k in cv_ix_all:
col_nam = 'bin_cnt_{ix}_{fields}_cv_ix_{k}'.format(ix=ix, fields="_".join(fields), k=k)
data_llh_cnt_ret[col_nam] = np.nan
cv_ix_tr_cur = cv_ix_tr[cv_ix_tr != k]
for k_val in cv_ix_all:
cnt_col = 'cv{cv}_bin_cnt_{ix}'.format(
cv=''.join(list(cv_ix_tr_cur[cv_ix_tr_cur != k_val].astype(str))),
ix=ix)
data_llh_cnt_ret.ix[data_all_out['cv_index'] == k_val, col_nam] = \
data_llh_cnt[cnt_col]
return data_llh_cnt_ret
def round_df(data, decimals=3):
for col_nam in data.columns:
if data[col_nam].dtype == float:
data[col_nam] = data[col_nam].round(decimals=decimals)
| UTF-8 | Python | false | false | 40,821 | py | 284 | base_util.py | 195 | 0.571765 | 0.554445 | 0 | 1,047 | 37.988539 | 120 |
Ongakute/PT-2021-komunikator | 7,679,401,555,938 | 60bdf2cd0c6abdd3e5a03740bcc5197aebc1bed1 | be788f8dc5750b571d7db21ec2ebb3fa4c3dd5f7 | /Client/response.py | 22bacc4a9ee3722bb1d129c3cc2ee590327013c7 | [] | no_license | https://github.com/Ongakute/PT-2021-komunikator | 44c7bbd8ca492b9698ca011b1ac19f588d822712 | c0fc74f5afa8d6f0f0b61be5658db7c05b9c932e | refs/heads/main | 2023-08-07T11:09:42.138336 | 2021-09-26T11:31:24 | 2021-09-26T11:31:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import time
from threading import Thread, Lock
import gi
import gui_callbacks
import global_functions
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Pango, Gdk
from gi.repository.GdkPixbuf import Pixbuf
from split_msg import main_split
class Response:
def __init__(self):
self.lock = Lock()
self.accept = False
self.exists = False
self.window_lock = Lock()
def Make_Response(self, buffer):
print(buffer)
signal = ""
#buffer = buffer.decode("UTF-8").replace("'", '"')
#mydata = ast.literal_eval(dict_str)
tmp = json.loads(buffer)
signal = tmp["signal"]
#print(signal)
data = tmp["data"]
if signal == "ACK":
return True
elif signal == "RJT":
return False
else:
return False
def Make_Response_Thread(self, buffer, window):
#self.lock.acquire()
print(buffer)
signal = ""
print("101")
buffer = buffer.decode("UTF-8").replace("'", '"')
print("10")
#mydata = ast.literal_eval(dict_str)
tmp = json.loads(buffer)
print("112")
signal = tmp["signal"]
data = tmp["data"]
print(signal, " ", data)
if signal == "ACK":
with self.lock:
self.accept = True
if data["action"] == "login":
print("daje okejke")
#time.sleep(0.08)
elif data["action"] == "add_contact":
#print("DANE: ",data["user"])
global_functions.contact_user_list.append(str(data["user"]))
#print(type(data["active"]))
if data["active"] == 1:
#print(data["user"])
global_functions.active_user_list.append(data["user"])
#print("2")
#time.sleep(0.08)
window.chat_window.refresh_contact_list(str(data["user"]))
#print("3")
elif data["action"] == "del_contact":
global_functions.contact_user_list.remove(data["user"])
#print("aktywby ", data["active"])
if data["active"] == 1:
global_functions.active_user_list.remove(data["user"])
#window.chat_window.czat._remove_chat(data["user"])
#time.sleep(0.08)
window.chat_window.refresh_contact_list_out(data["user"])
else:
return
#print(data["data"])
#time.sleep(0.08)
window.alert_text = data["data"]
return
#window.Show_alert(data["data"])
#print(type(data))
#window.chat_window.Show_alert_window(data)
#print("okejka")
elif signal == "RJT":
#print("rejeeect")
with self.lock:
self.accept = False
if data["action"] == "pass_reset_exists":
self.exists = True
elif data["action"] == "login_exists":
self.exists = True
#time.sleep(0.08)
window.alert_text = data["data"]
#print("rejeeect")
return
#lista kontaktow uzytkownika
elif signal == "LCU":
print("55")
contact = data["contacts"].split(',')
#print("LCU: ", contact, " len: ", len(contact) )
print("66")
if contact[0] != '':
print("77")
global_functions.contact_user_list = contact
#lista aktywnych kontaktow
elif signal == "LAU":
#print(data)
with self.window_lock:
window.login_window.After_Login()
contact = data["active"].split(',')
print("88")
if global_functions.contact_user_list:
print("99")
global_functions.active_user_list = list(set(global_functions.contact_user_list).intersection(contact))
#window.login_window.After_Login()
if global_functions.active_user_list:
#time.sleep(0.08)
print(global_functions.active_user_list)
with self.window_lock:
window.chat_window.active_users()
return
#przybycie nowego uzytkownika
elif signal == "NUR":
contact = data["login"]
#print(repr(contact))
if contact in (global_functions.contact_user_list):
#print("1")
#time.sleep(0.1)
with self.window_lock:
window.chat_window.refresh_contact_list(contact)
#print("2")
return
#window.refresh_contact_list()
#window.add_contact(contact)
#przybycie nowego uzytkownika
elif signal == "NCL":
contact = data["login"]
#print(data)
if contact in (global_functions.contact_user_list):
#time.sleep(0.08)
with self.window_lock:
window.chat_window.refresh_contact_list_out(contact)
#odebranie wiadomosci
elif signal == "MSG":
mess = [data["date"] + "\n" + data["from"] + ":\n" + main_split(data["message"]), 1]
#print("from ", data["from"], " who ",window.chat_window.uzytkownik)
#global_functions.income_message_list += mess
#time.sleep(0.08)
with self.window_lock:
window.chat_window.refresh_chat(mess, data["from"])
#zaproszenie do znajomych
elif signal == "CIN":
#wyświetlanie okna dialogowego
pass
elif signal == "CAP":
contact = data["user"]
print(repr(contact))
global_functions.contact_user_list += contact
global_functions.active_user_list.append(contact)
#time.sleep(0.08)
window.chat_window.refresh_contact_list(contact)
else:
print("oj ne ne ")
#self.lock.release()
return
| UTF-8 | Python | false | false | 6,470 | py | 22 | response.py | 15 | 0.479827 | 0.470706 | 0 | 199 | 31.472362 | 119 |
montionugera/IntroductionToDataScience | 5,815,385,751,683 | 884c969e84e8bde4742f40000871d49d646fc88a | 88f330d068b16b964c31256088bbf35827ee5851 | /DW-DataExtraction/L4/find_cities.py | 00a06f5035a19f1ef5d528f13b7474784025051a | [] | no_license | https://github.com/montionugera/IntroductionToDataScience | b4408a4ec584b2debaaeceaeae9313bad26c7272 | cba60c4e6ba1bc56f0100c4c872f9c2668b8cfc0 | refs/heads/master | 2021-01-18T22:58:13.648089 | 2016-04-20T16:47:12 | 2016-04-20T16:47:12 | 41,616,909 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
Your task is to write a query that will return all cities
that are founded in 21st century.
Please modify only 'range_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
"""
from datetime import datetime
example_cities = {
'areaCode': ['916'],
'areaLand': 109271000.0,
'country': 'United States',
'elevation': 13.716,
'foundingDate': datetime(2000, 7, 1, 0, 0),
'governmentType': ['Council\u2013manager government'],
'homepage': ['http://elkgrovecity.org/'],
'isPartOf': ['California', u'Sacramento County California'],
'lat': 38.4383,
'leaderTitle': 'Chief Of Police',
'lon': -121.382,
'motto': 'Proud Heritage Bright Future',
'name': 'City of Elk Grove',
'population': 155937,
'postalCode': '95624 95757 95758 95759',
'timeZone': ['Pacific Time Zone'],
'utcOffset': ['-7', '-8']
}
def range_query():
# Modify the below line with your query.
# You can use datetime(year, month, day) to specify date in the query
query = {u'foundingDate' : {'$gte':datetime(2001, 1, 1, 0, 0)}}
return query
# Do not edit code below this line in the online code editor.
# Code here is for local use on your own computer.
def get_db():
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client.examples
return db
if __name__ == "__main__":
# For local use
db = get_db()
query = range_query()
cities = db.cities.find(query)
print "Found cities:", cities.count()
import pprint
pprint.pprint(cities[0])
| UTF-8 | Python | false | false | 1,849 | py | 175 | find_cities.py | 20 | 0.666847 | 0.620335 | 0 | 60 | 29.816667 | 83 |
girder/girder_worker | 7,593,502,212,909 | cc6bf6de475816430d3a38d9128de3b8b41129c2 | 8064af9cb8120b5ebdfcdcbb10547695e3c646d2 | /tests/test_utils.py | e920ce361c0e73db9fecb668dd7649101142da38 | [
"Apache-2.0"
] | permissive | https://github.com/girder/girder_worker | 5f7391c2ea41d953e9b7e380fdb74a0a38c21e2f | 19b4fc3360a0c9d92fbd0ecd1bfab693f8c75ae7 | refs/heads/master | 2022-11-20T10:41:26.072154 | 2022-11-17T13:27:31 | 2022-11-17T13:27:31 | 50,359,589 | 39 | 30 | Apache-2.0 | false | 2022-11-17T13:27:33 | 2016-01-25T15:24:13 | 2022-07-26T14:59:58 | 2022-11-17T13:27:32 | 3,024 | 33 | 25 | 73 | Python | false | false | import sys
from girder_worker.utils import TeeStdOutCustomWrite
def test_TeeStdOutCustomWrite(capfd):
nonlocal_ = {'data': ''}
def _append_to_data(message, **kwargs):
nonlocal_['data'] += message
with TeeStdOutCustomWrite(_append_to_data):
sys.stdout.write('Test String')
sys.stdout.flush()
assert nonlocal_['data'] == 'Test String'
out, err = capfd.readouterr()
assert out == 'Test String'
| UTF-8 | Python | false | false | 445 | py | 119 | test_utils.py | 73 | 0.649438 | 0.649438 | 0 | 18 | 23.722222 | 52 |
vincentiusmartin/chip2probe | 11,519,102,336,319 | 7a42016287f66fc18f2ffcd2578f2e6e44d697dd | f0076189d3e76cc4dc23f772147bc49282c7987f | /chip2probe/sitespredict/sequence_old.py | 42bdf81b4ec2fc4eaeaff15129fa474efb8b9a95 | [] | permissive | https://github.com/vincentiusmartin/chip2probe | fd3b6eeaeea69dc2730dac097a0692de8d2a20e0 | 6271b7ede0cacc8fea2b93798b46efb867971478 | refs/heads/master | 2022-05-01T23:33:18.812833 | 2022-03-30T21:33:57 | 2022-03-30T21:33:57 | 200,905,486 | 1 | 1 | MIT | false | 2020-06-22T22:53:30 | 2019-08-06T18:38:49 | 2020-06-22T01:05:13 | 2020-06-22T22:53:30 | 27,191 | 1 | 1 | 0 | Python | false | false | """
This file contains BindingSite class and Sequence class.
Created on Jul 25, 2019
Authors: Vincentius Martin, Farica ZHuang
"""
import collections
import copy
class BindingSite(object):
"""Class for BindingSite object."""
def __init__(self, site_pos, site_start, site_end, core_start, core_end, core_mid,
sequence_in_site, protein, score=1, barrier=1):
"""
Initialize class variables for BindingSite object.
For kompas, site_start=core_start and site_end=core_end.
For imads, they will be different since site_start and site_end are
20bp to the left and right of core
"""
self.site_pos = site_pos
self.site_start = site_start
self.site_end = site_end
self.core_start = core_start
self.core_end = core_end
self.core_mid = core_mid
self.site_sequence = sequence_in_site
self.score = score
# bp from the core that shouldn't be mutated
self.barrier = barrier
self.protein = protein
def __str__(self):
"""Return the string representation of the BindingSite object."""
return "site_pos: {}, score: {}, site_start: {}, site_end: {}, \
core_start: {}, core_end: {}, site_sequence {}, protein: {}"\
.format(self.site_pos, self.score, self.site_start,
self.site_end, self.core_start, self.core_end,
self.site_sequence, self.protein)
MutatedSequence = collections.namedtuple('MutatedSequence',
'sequence, \
escore_preds, \
model_preds, \
proteins, \
escore_cutoff, \
escore_gap, \
mutpos, \
plot_functions')
class Sequence(object):
"""Class for Sequence object."""
def __init__(self, escore_preds, model_preds, proteins, pbmescores, escore_cutoff=0.4, escore_gap=0):
"""
Initialize a sequence object.
:param escore_preds: prediction from the Escore class, the sequence string will
be obtained from this object.
:param model_preds: give the position of the core predicted by a model (kompas or imads).
The quality of the prediction will be assessed by using 'escore_preds'.
:param proteins: list of proteins with length 1 if homotypic and 2 if heterotypic
:param escore_cutoff: we determine that the site is specific when there are 2
consecutive positions with escore above this cutoff. The escore range from
-0.5 to 0.5, default cutoff is 0.4.
:param escore_gap: how many gaps (i.e. value below cutoff) are permitted between specific
escore position, the value is 0 by default
"""
self.escore_preds = escore_preds
self.model_preds = model_preds
self.bsites = {}
self.proteins = proteins
self.escore_cutoff = escore_cutoff
self.pbmescores = pbmescores
self.escore_gap = escore_gap
seq = ""
# initialize class variables
for protein in proteins:
# check that the sequences are the same for both proteins
if seq == "":
seq = escore_preds[protein].sequence
else:
assert seq == escore_preds[protein].sequence
self.bsites[protein] = self.get_bsite(self.escore_preds[protein],
self.model_preds[protein],
protein, escore_cutoff,
escore_gap)
self.sequence = seq
def __str__(self):
"""Get string representation of Sequence object."""
return "Sequence object: {}\nsites {}".format(self.sequence, str(self.bsites))
def mutate_escore_seq_at_pos(self, seq, pos, threshold):
"""Return a mutated sequence that is nonspecific for all proteins."""
if len(seq) != 8:
raise Exception("sequence must be at length of 8")
nucleotides = "ACGT"
all_muts = {}
# get every mutation for the sequence at the given position
for n in nucleotides:
seqlist = list(seq)
# mutate the sequence
seqlist[pos] = n
mutseq = "".join(seqlist)
all_muts[mutseq] = {}
# get esocre of this mutation for each protein
for protein in self.proteins:
# get the escore for the mutated sequence
all_muts[mutseq][protein] = self.pbmescores[protein].predict_sequence(mutseq).predictions[0]['score']
# find a mutated sequence that is non-specific for all proteins
min_sum = float("inf")
min_seq = ""
# iterate through mutated sequences
for seq in all_muts:
# get escores
if all(i < threshold for i in all_muts[seq].values()):
if sum(all_muts[seq].values()) < min_sum:
min_sum = sum(all_muts[seq].values())
min_seq = seq
return min_seq
def get_max_non_intersecting_escore(self, protein, full_seq, site_index):
"""
Get maxescore from given core that doesn't intersect a different core.
This is to prevent mutating other cores.
"""
# get the sequence we want to get the escore of
s_start = self.bsites[protein][site_index].site_start
s_end = self.bsites[protein][site_index].site_end
site_seq = full_seq[s_start:s_end]
# get the escore prediction for the sequence
epreds = self.pbmescores[protein].predict_sequence(site_seq).predictions
# initialize non intersecting max escore
max_escore = {"score": float("inf")}
# find non intersecting max escore
while max_escore["score"] == float("inf") and epreds:
# get 8mer in the given core with the highest escore
max_val = max(epreds, key=lambda x: x['score'])
# get position of this max escore
max_val_seqpos = self.bsites[protein][site_index].site_start + max_val['position']
# loop through each protein
for curr_protein in self.bsites:
# loop through each binding site of this protein
for i in range(len(self.bsites[curr_protein])):
# skip if checking against itself
if i == site_index and curr_protein == protein:
continue
# get region of this other core that the
unmutated_start = self.bsites[curr_protein][i].core_start - self.bsites[curr_protein][i].barrier
unmutated_end = self.bsites[curr_protein][i].core_end + self.bsites[curr_protein][i].barrier
# if this max escore intersects with another core, remove
if max_val_seqpos >= unmutated_start and max_val_seqpos < unmutated_end and max_val in epreds:
epreds.remove(max_val)
# otherwise, initialize this as max non intersecting escore
else:
max_escore = copy.copy(max_val)
return max_escore
def eliminate_site(self, protein, sequence, site_index,
escore_threshold=0.3):
"""
Eliminate the given site from the sequence.
This assumes that input is a sequence of a site
site_index: which site to mutate
"""
maxescore = 1 # just to initialize with a large value
prevmax = -1 # just to avoid infinite loop
# list of mutated sites
site_mutpos = []
# sequence
full_sequence = str(sequence)
flag = True
while flag and prevmax != maxescore:
prevmax = float(maxescore)
maxepreds = self.get_max_non_intersecting_escore(protein=protein,
full_seq=full_sequence,
site_index=site_index)
maxescore = maxepreds['score']
# if the max non intersecting escore is below the threshold, nothing to mutate
if maxescore < escore_threshold:
flag = False
else:
# return immediately if the site can't be mutated
if maxescore == float("inf"): # no e-score that can be chosen
# since there is no site to be mutated, then just use empty list
return full_sequence, []
seq_tomutate = maxepreds["escore_seq"]
midpos = len(seq_tomutate) // 2
# get new mutated sequence
mutated_escore_seq = self.mutate_escore_seq_at_pos(seq_tomutate, midpos, escore_threshold)
if mutated_escore_seq != "":
# mutate the sequence
mut_start = self.bsites[protein][site_index].site_start + maxepreds["start_idx"]
mut_end = mut_start + len(mutated_escore_seq)
full_sequence = full_sequence[:mut_start] + mutated_escore_seq + full_sequence[mut_end:]
site_mutpos.append(mut_start + midpos)
else:
full_sequence = ""
site_mutpos = []
# return the new mutated sequence and the positions mutated
return full_sequence, site_mutpos
def abolish_sites(self, sites, mode="to_eliminate",
escore_threshold=0.3):
"""
proteins: list of proteins whose core to be abolished.
type can either be to_eliminate or to_keep
"""
# if we have multiple pbmescore and proteins to abolish
mutated = str(self.sequence)
for j in range(len(self.proteins)):
protein = self.proteins[j]
sites_to_mutate = []
if mode == "to_eliminate":
sites_to_mutate = sites[protein]
elif mode == "to_keep":
sites_to_mutate = [i for i in range(len(self.bsites[protein])) if i not in sites[protein]]
else:
raise Exception("type should either be to_eliminate or to_keep")
mutpos = []
for i in sites_to_mutate:
mutated, site_mutpos = self.eliminate_site(protein=protein, sequence=mutated,
site_index=i,
escore_threshold=escore_threshold)
mutpos.extend(site_mutpos)
functions = []
for pos in mutpos:
functions.append({"func": "axvline", "args": [pos],
"kwargs": {"color": "purple",
"linestyle": "dashed",
"linewidth": 1}})
return MutatedSequence(mutated, self.escore_preds, self.model_preds,
self.proteins, self.escore_cutoff,
self.escore_gap, mutpos, functions)
# TODO: change to pbmescore.get_escores_specific
def get_bsite(self, escore_preds, model_preds, protein,
escore_cutoff=0.4, escore_gap=0):
"""
Get binding site objects.
escore_gap : value below threshold allowed to still say that an 8-mer is still within specific window
"""
# initialize a list for the model predictions we want to keep
model_pred_keep = []
sequence = escore_preds.sequence
escores = escore_preds.predictions
signifcount = 0
startidx = -1
gapcount = 0
bindingsites = []
for i in range(0, len(escores)):
escoresite = escores[i]
if escoresite["score"] > escore_cutoff:
if signifcount == 0:
startidx = i
signifcount += 1
gapcount = 0
# we can ignore else if here since we need i == len(esores)-1
if escoresite["score"] <= escore_cutoff and i != len(escores) - 1 and gapcount < escore_gap:
# check if the sequence is still within
gapcount += 1
elif escoresite["score"] <= escore_cutoff or i == len(escores) - 1:
if signifcount > 0:
# if we have found sufficient e-scores above the cutoff then get the binding sites
if signifcount >= 2:
# startpos: the start of binding
escore_bind = {"startpos": escores[startidx]['position'],
"escorelength": signifcount + gapcount,
"escore_startidx": escores[startidx]['start_idx']}
# we get e-scores now we look for its imads binding site
for model_pred in model_preds.predictions:
escore_start = escore_bind['startpos']
escore_end = escore_start + escore_bind["escorelength"]
core_start = model_pred["core_start"]
core_end = core_start + model_pred["core_width"]
if escore_start <= core_end and core_start <= escore_end: #overlap
# a site is found,
# also sequence can be negative or have length above sequence length since we appended
# flanking regions, so we need to define site start and site end
site_start = max(0, model_pred["site_start"])
site_end = min(model_pred["site_start"] + model_pred["site_width"], len(sequence))
bsite = BindingSite(site_pos=(core_start + core_end) // 2,
score=model_pred["score"],
site_start=site_start,
site_end=site_end,
core_start=model_pred["core_start"],
core_mid=model_pred["core_mid"],
core_end=model_pred["core_start"] + model_pred["core_width"],
sequence_in_site=sequence[site_start:site_end],
protein=protein)
bindingsites.append(bsite)
# add this imads pred to the list of imads preds we
# want to keep
model_pred_keep.append(model_pred)
startidx = -1
signifcount = 0
gapcount = 0
self.model_preds[protein].predictions = model_pred_keep
# return the list of binding sites for this protein
return bindingsites
def sites_to_dict(self, bindingsites):
"""Put binding site objects into a dictionary of attributes."""
out_dict = {}
# loop through each list of binding site objects
for protein in bindingsites:
bs_list = bindingsites[protein]
# loop through each binding site object
for i in range(0, len(bs_list)):
attrs = [attr for attr in dir(bs_list[i])
if not callable(getattr(bs_list[i], attr)) \
and not attr.startswith("__")]
for attr in attrs:
out_dict["%s_%d" % (attr, i+1)] = getattr(bs_list[i], attr)
return out_dict
def get_sites_dist(self, site1=0, site2=1):
"""Get distance between two binding sites."""
if len(self.proteins) == 1:
protein = self.proteins[0]
return abs(self.bsites[protein][site2].core_mid - self.bsites[protein][site1].core_mid)
protein1 = self.proteins[0]
protein2 = self.proteins[1]
return abs(self.bsites[protein1][site1].core_mid - self.bsites[protein2][site1].core_mid)
def get_sites_dict(self):
"""Get dictionary of sites in this sequence."""
return self.sites_to_dict(self.bsites)
def site_exist(self):
"""Return true if there is at least 1 site in the sequence."""
return self.site_count != 0
def site_count_all(self):
"""Return the number of binding sites."""
tot = 0
for protein in self.proteins:
tot += self.site_count(protein)
return tot
def site_count(self, protein):
"""Get number of binding sites in the sequence for the given protein."""
return len(self.bsites[protein])
def get_center_bsites(self):
"""Find the indices of non-center binding sites."""
# get the lists of midpoints of binding sites for all proteins
midpoints = {}
for protein in self.proteins:
preds = self.model_preds[protein].predictions
midpoints[protein] = [(int(d['core_start']) + int(d['core_width'])/2) for d in preds]
min_dist = float('inf')
mid_lst1 = midpoints[self.proteins[0]]
if len(self.proteins) > 1:
mid_lst2 = midpoints[self.proteins[1]]
else:
mid_lst2 = midpoints[self.proteins[0]]
# find the pair centered binding sites
for i in range(len(mid_lst1)):
for j in range(len(mid_lst2)):
mid1 = mid_lst1[i]
mid2 = mid_lst2[j]
dist = (mid1 + mid2) / 2 - 18
if abs(dist) < min_dist:
min_dist = abs(dist)
keep_mid1 = i
keep_mid2 = j
# returns a dictionary of index of binding sites to keep for each protein
if len(self.proteins) > 1:
return {self.proteins[0]: [keep_mid1], self.proteins[1]: [keep_mid2]}
return {self.proteins[0]: [keep_mid1, keep_mid2]}
def are_centered(self):
"""Check if the pair of binding sites are centered."""
# if more than 2 binding sites, raise exception
if self.site_count_all() != 2:
return False
# raise Exception("Number of binding sites found is {}. Should be 2."
# .format(self.site_count_all()))
# get the midpoints of binding sites for all proteins
midpoints = []
for protein in self.proteins:
preds = self.model_preds[protein].predictions
midpoints += [(d['core_start'] + d['core_width'] / 2) for d in preds]
dist = ((midpoints[0] + midpoints[1]) / 2) - 18
if abs(dist) > 3:
return False
return True
def remove_pos(self, pos):
"""
Return the indices of each protein after removing binding sites.
Binding site(s) removed are specified by pos.
pos: list of indices of binding sites to be removed from the sequence
Return: a dictionary of indices to mutate for each protein
"""
# make sure there are 2 binding sites left
if not self.is_valid():
raise Exception("Not a valid wild type")
if len(self.proteins) == 1:
return {self.proteins[0]: pos}
if pos == [0, 1]:
return {self.proteins[0]: [0], self.proteins[1]:[0]}
# find left hand binding site
if pos == [0]:
if self.bsites[self.proteins[0]][0]['core_start']\
< self.bsites[self.proteins[1]][0]['core_start']:
return {self.proteins[0]: [0], self.proteins[1]: []}
return {self.proteins[0]: [], self.proteins[1]: [0]}
else:
if self.bsites[self.proteins[0]][0]['core_start'] \
< self.bsites[self.proteins[1]][0]['core_start']:
return {self.proteins[0]: [], self.proteins[1]: [0]}
return {self.proteins[0]: [0], self.proteins[1]: []}
def is_valid(self):
"""
Mutate a sequence to make it valid.
A valid sequence has exactly two centered binding site.
Return: True if resulting sequence is valid, False otherwise
"""
# get number of proteins we are dealing with
num_prot = len(self.proteins)
# check base cases for homotypic cluster
if num_prot == 1:
if self.site_count_all() == 2 and self.are_centered():
return True
elif self.site_count_all() < 2:
return False
# check base cases for heterotypic cluster
elif num_prot == 2:
if self.site_count(self.proteins[0]) == self.site_count(self.proteins[1])==1 \
and self.are_centered():
return True
elif self.site_count(self.proteins[0]) == 0 \
or self.site_count(self.proteins[1]) == 0:
return False
# if there are more than 2 significant binding sites, mutate
else:
to_keep = self.get_center_bsites()
mut_seq = self.abolish_sites(to_keep, mode="to_keep",
escore_threshold=self.escore_cutoff)
# Update the current Sequence object with the valid, mutated sequence
self.__init__(mut_seq.escore_preds, mut_seq.model_preds,
proteins=mut_seq.proteins,
escore_cutoff=self.escore_cutoff,
escore_gap=self.escore_gap, pbmescores=self.pbmescores)
# check if mutation was successful
if ((len(self.proteins) == 1 and self.site_count_all() == 2) \
or (len(self.proteins) == 2 and self.site_count(self.proteins[0])==self.site_count(self.proteins[1])==1)) \
and self.are_centered():
return True
else:
return False
return False
| UTF-8 | Python | false | false | 22,421 | py | 235 | sequence_old.py | 178 | 0.529236 | 0.521877 | 0 | 487 | 45.039014 | 122 |
hrishiwaikar/BE-Computer-Lab-3 | 6,098,853,576,603 | 3b10b382b622fc199acc6c88ceb28aab0a105df1 | e987a05008eaf26042c6b745aafa6bac8e1b0b5d | /Concurrent Quicksort/quicksort_parallel.py | 6ab7cb5d9f7b71def9982e7ba76bce52b998a9be | [] | no_license | https://github.com/hrishiwaikar/BE-Computer-Lab-3 | 49dad44bb48609b287b209f4fbfe6908f66bb382 | 0d222ded11216945e358a3f6279413b06a805217 | refs/heads/master | 2021-06-15T15:05:40.883563 | 2017-04-21T18:05:18 | 2017-04-21T18:05:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
import threading
import random
def quicksort(arr,left,right):
#print 'Thread '+str(threading.current_thread())+'is sorting '+str(arr[left:right])
#print str(left)+' to '+str(right)
time.sleep(0.3)
#print time.ctime()
#print arr
thread_left=None
thread_right=None
if left<right:
p = partition(arr,left,right)
#print 'Going Left'
#quicksort(arr,left,p-1)
thread_left = threading.Thread(target=lambda :quicksort(arr,left,p-1))
thread_left.start()
#print 'Going Right '
#quicksort(arr,p+1,right)
thread_right = threading.Thread(target=lambda :quicksort(arr,p+1,right))
thread_right.start()
if thread_right!=None:
thread_right.join()
if thread_left!=None:
thread_left.join()
def partition(arr,left,right):
pivot = left
l = left+1
r = right
while l<=r:
while l<=r and arr[l]<arr[pivot]:
l=l+1
while l<=r and arr[r]>arr[pivot]:
r=r-1
if l<=r:
temp = arr[l]
arr[l]=arr[r]
arr[r]=temp
else:
break
#exchange pivot with r
temp = arr[pivot]
arr[pivot]=arr[r]
arr[r]=temp
return r
'''
numbers = []
for i in range(20):
rno = random.randint(1,100)
while rno in numbers:
rno = random.randint(1, 100)
numbers.append(rno)
print time.ctime()
print numbers
quicksort(numbers,0,len(numbers)-1)
print time.ctime()
print numbers
''' | UTF-8 | Python | false | false | 1,547 | py | 24 | quicksort_parallel.py | 18 | 0.56755 | 0.553975 | 0 | 79 | 18.582278 | 87 |
NegatioN/ssd_keras | 13,073,880,499,979 | 24a802dce6032968b9260c956e939924f86e2709 | 2adb1a47984ba6d1a3fe1458ebf74cac84dc04f3 | /ssd_utils.py | 234e872d2e32bf7649f696151e3a2c6cfa4be612 | [
"MIT"
] | permissive | https://github.com/NegatioN/ssd_keras | 792cea019e7ab38d1d85061343a91d949fd52e59 | 45e7d76ccad463e5630af1cf18f9e964519575de | refs/heads/master | 2021-01-20T14:02:06.755091 | 2017-08-03T23:19:42 | 2017-08-03T23:19:42 | 90,547,120 | 1 | 1 | null | true | 2017-05-07T16:43:28 | 2017-05-07T16:43:28 | 2017-05-07T14:05:50 | 2017-03-26T15:14:09 | 3,270 | 0 | 0 | 0 | null | null | null | """Some utils for SSD."""
import numpy as np
import tensorflow as tf
from random import shuffle
from scipy.misc import imread
from scipy.misc import imresize
from keras.applications.imagenet_utils import preprocess_input
import os
from keras.preprocessing import image
from keras.optimizers import Adam
def img_to_array(img, dim_ordering='th'):
if dim_ordering not in ['th', 'tf']:
raise Exception('Unknown dim_ordering: ', dim_ordering)
# image has dim_ordering (height, width, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise Exception('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
'''Load an image into PIL format.
# Arguments
path: path to image file
grayscale: boolean
target_size: None (default to original size)
or (img_height, img_width)
'''
from PIL import Image
img = Image.open(path)
if grayscale:
img = img.convert('L')
else: # Ensure 3 channel even when loaded image is grayscale
img = img.convert('RGB')
if target_size:
img = img.resize((target_size[1], target_size[0]))
return img
class BBoxUtility(object):
"""Utility class to do some stuff with bounding boxes and priors.
# Arguments
num_classes: Number of classes excluding background.
priors: Priors and variances, numpy tensor of shape (num_priors, 8),
priors[i] = [xmin, ymin, xmax, ymax, varxc, varyc, varw, varh].
overlap_threshold: Threshold to assign box to a prior.
nms_thresh: Nms threshold.
top_k: Number of total bboxes to be kept per image after nms step.
# References
https://arxiv.org/abs/1512.02325
"""
# TODO add setter methods for nms_thresh and top_K
def __init__(self, num_classes, priors=None, overlap_threshold=0.5,
nms_thresh=0.45, top_k=400):
self.num_classes = num_classes + 1 # extra class used to infer positive or negative loss at training-time.
self.priors = priors
self.num_priors = 0 if priors is None else len(priors)
self.overlap_threshold = overlap_threshold
self._nms_thresh = nms_thresh
self._top_k = top_k
self.boxes = tf.placeholder(dtype='float32', shape=(None, 4))
self.scores = tf.placeholder(dtype='float32', shape=(None,))
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
self.sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
@property
def nms_thresh(self):
return self._nms_thresh
@nms_thresh.setter
def nms_thresh(self, value):
self._nms_thresh = value
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
@property
def top_k(self):
return self._top_k
@top_k.setter
def top_k(self, value):
self._top_k = value
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
def iou(self, box):
"""Compute intersection over union for the box with all priors.
# Arguments
box: Box, numpy tensor of shape (4,).
# Return
iou: Intersection over union,
numpy tensor of shape (num_priors).
"""
# compute intersection
inter_upleft = np.maximum(self.priors[:, :2], box[:2])
inter_botright = np.minimum(self.priors[:, 2:4], box[2:])
inter_wh = inter_botright - inter_upleft
inter_wh = np.maximum(inter_wh, 0)
inter = inter_wh[:, 0] * inter_wh[:, 1]
# compute union
area_pred = (box[2] - box[0]) * (box[3] - box[1])
area_gt = (self.priors[:, 2] - self.priors[:, 0])
area_gt *= (self.priors[:, 3] - self.priors[:, 1])
union = area_pred + area_gt - inter
# compute iou
iou = inter / union
return iou
def encode_box(self, box, return_iou=True):
"""Encode box for training, do it only for assigned priors.
# Arguments
box: Box, numpy tensor of shape (4,).
return_iou: Whether to concat iou to encoded values.
# Return
encoded_box: Tensor with encoded box
numpy tensor of shape (num_priors, 4 + int(return_iou)).
"""
iou = self.iou(box)
encoded_box = np.zeros((self.num_priors, 4 + return_iou))
assign_mask = iou > self.overlap_threshold
if not assign_mask.any():
assign_mask[iou.argmax()] = True
if return_iou:
encoded_box[:, -1][assign_mask] = iou[assign_mask]
assigned_priors = self.priors[assign_mask]
box_center = 0.5 * (box[:2] + box[2:])
box_wh = box[2:] - box[:2]
assigned_priors_center = 0.5 * (assigned_priors[:, :2] +
assigned_priors[:, 2:4])
assigned_priors_wh = (assigned_priors[:, 2:4] -
assigned_priors[:, :2])
# we encode variance
encoded_box[:, :2][assign_mask] = box_center - assigned_priors_center
encoded_box[:, :2][assign_mask] /= assigned_priors_wh
encoded_box[:, :2][assign_mask] /= assigned_priors[:, -4:-2]
encoded_box[:, 2:4][assign_mask] = np.log(box_wh /
assigned_priors_wh)
encoded_box[:, 2:4][assign_mask] /= assigned_priors[:, -2:]
return encoded_box.ravel()
def assign_boxes(self, boxes):
"""Assign boxes to priors for training.
# Arguments
boxes: Box, numpy tensor of shape (num_boxes, 4 + num_classes),
num_classes without background.
# Return
assignment: Tensor with assigned boxes,
numpy tensor of shape (num_boxes, 4 + num_classes + 8),
priors in ground truth are fictitious,
assignment[:, -8] has 1 if prior should be penalized
or in other words is assigned to some ground truth box,
assignment[:, -7:] are all 0. See loss for more details.
"""
assignment = np.zeros((self.num_priors, 4 + self.num_classes + 8))
assignment[:, 4] = 1.0
if len(boxes) == 0:
return assignment
encoded_boxes = np.apply_along_axis(self.encode_box, 1, boxes[:, :4])
encoded_boxes = encoded_boxes.reshape(-1, self.num_priors, 5)
best_iou = encoded_boxes[:, :, -1].max(axis=0)
best_iou_idx = encoded_boxes[:, :, -1].argmax(axis=0)
best_iou_mask = best_iou > 0
best_iou_idx = best_iou_idx[best_iou_mask]
assign_num = len(best_iou_idx)
encoded_boxes = encoded_boxes[:, best_iou_mask, :]
assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx,
np.arange(assign_num),
:4]
assignment[:, 4][best_iou_mask] = 0
assignment[:, 5:-8][best_iou_mask] = boxes[best_iou_idx, 4:]
assignment[:, -8][best_iou_mask] = 1
return assignment
def decode_boxes(self, mbox_loc, mbox_priorbox, variances):
"""Convert bboxes from local predictions to shifted priors.
# Arguments
mbox_loc: Numpy array of predicted locations.
mbox_priorbox: Numpy array of prior boxes.
variances: Numpy array of variances.
# Return
decode_bbox: Shifted priors.
"""
prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0]
prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1]
prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])
prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])
decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0]
decode_bbox_center_x += prior_center_x
decode_bbox_center_y = mbox_loc[:, 1] * prior_width * variances[:, 1]
decode_bbox_center_y += prior_center_y
decode_bbox_width = np.exp(mbox_loc[:, 2] * variances[:, 2])
decode_bbox_width *= prior_width
decode_bbox_height = np.exp(mbox_loc[:, 3] * variances[:, 3])
decode_bbox_height *= prior_height
decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width
decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height
decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width
decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height
decode_bbox = np.concatenate((decode_bbox_xmin[:, None],
decode_bbox_ymin[:, None],
decode_bbox_xmax[:, None],
decode_bbox_ymax[:, None]), axis=-1)
decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0)
return decode_bbox
def detection_out(self, predictions, background_label_id=0, keep_top_k=200,
confidence_threshold=0.01):
"""Do non maximum suppression (nms) on prediction results.
# Arguments
predictions: Numpy array of predicted values.
num_classes: Number of classes for prediction.
background_label_id: Label of background class.
keep_top_k: Number of total bboxes to be kept per image
after nms step.
confidence_threshold: Only consider detections,
whose confidences are larger than a threshold.
# Return
results: List of predictions for every picture. Each prediction is:
[label, confidence, xmin, ymin, xmax, ymax]
"""
mbox_loc = predictions[:, :, :4]
variances = predictions[:, :, -4:]
mbox_priorbox = predictions[:, :, -8:-4]
mbox_conf = predictions[:, :, 4:-8]
results = []
for i in range(len(mbox_loc)):
results.append([])
decode_bbox = self.decode_boxes(mbox_loc[i],
mbox_priorbox[i], variances[i])
for c in range(self.num_classes):
if c == background_label_id:
continue
c_confs = mbox_conf[i, :, c]
c_confs_m = c_confs > confidence_threshold
if len(c_confs[c_confs_m]) > 0:
boxes_to_process = decode_bbox[c_confs_m]
confs_to_process = c_confs[c_confs_m]
feed_dict = {self.boxes: boxes_to_process,
self.scores: confs_to_process}
idx = self.sess.run(self.nms, feed_dict=feed_dict)
good_boxes = boxes_to_process[idx]
confs = confs_to_process[idx][:, None]
labels = c * np.ones((len(idx), 1))
c_pred = np.concatenate((labels, confs, good_boxes),
axis=1)
results[-1].extend(c_pred)
if len(results[-1]) > 0:
results[-1] = np.array(results[-1])
argsort = np.argsort(results[-1][:, 1])[::-1]
results[-1] = results[-1][argsort]
results[-1] = results[-1][:keep_top_k]
return results
class Generator(object):
def __init__(self, gt, bbox_util,
batch_size, path_prefix,
train_keys, val_keys, image_size,
saturation_var=0.5,
brightness_var=0.5,
contrast_var=0.5,
lighting_std=0.5,
hflip_prob=0.5,
vflip_prob=0.5,
do_crop=True,
crop_area_range=[0.75, 1.0],
aspect_ratio_range=[3./4., 4./3.]):
self.gt = gt
self.bbox_util = bbox_util
self.batch_size = batch_size
self.path_prefix = path_prefix
self.train_keys = train_keys
self.val_keys = val_keys
self.train_batches = len(train_keys)
self.val_batches = len(val_keys)
self.image_size = image_size
self.color_jitter = []
if saturation_var:
self.saturation_var = saturation_var
self.color_jitter.append(self.saturation)
if brightness_var:
self.brightness_var = brightness_var
self.color_jitter.append(self.brightness)
if contrast_var:
self.contrast_var = contrast_var
self.color_jitter.append(self.contrast)
self.lighting_std = lighting_std
self.hflip_prob = hflip_prob
self.vflip_prob = vflip_prob
self.do_crop = do_crop
self.crop_area_range = crop_area_range
self.aspect_ratio_range = aspect_ratio_range
def grayscale(self, rgb):
return rgb.dot([0.299, 0.587, 0.114])
def saturation(self, rgb):
gs = self.grayscale(rgb)
alpha = 2 * np.random.random() * self.saturation_var
alpha += 1 - self.saturation_var
rgb = rgb * alpha + (1 - alpha) * gs[:, :, None]
return np.clip(rgb, 0, 255)
def brightness(self, rgb):
alpha = 2 * np.random.random() * self.brightness_var
alpha += 1 - self.saturation_var
rgb = rgb * alpha
return np.clip(rgb, 0, 255)
def contrast(self, rgb):
gs = self.grayscale(rgb).mean() * np.ones_like(rgb)
alpha = 2 * np.random.random() * self.contrast_var
alpha += 1 - self.contrast_var
rgb = rgb * alpha + (1 - alpha) * gs
return np.clip(rgb, 0, 255)
def lighting(self, img):
cov = np.cov(img.reshape(-1, 3) / 255.0, rowvar=False)
eigval, eigvec = np.linalg.eigh(cov)
noise = np.random.randn(3) * self.lighting_std
noise = eigvec.dot(eigval * noise) * 255
img += noise
return np.clip(img, 0, 255)
def horizontal_flip(self, img, y):
if np.random.random() < self.hflip_prob:
img = img[:, ::-1]
y[:, [0, 2]] = 1 - y[:, [2, 0]]
return img, y
def vertical_flip(self, img, y):
if np.random.random() < self.vflip_prob:
img = img[::-1]
y[:, [1, 3]] = 1 - y[:, [3, 1]]
return img, y
def random_sized_crop(self, img, targets):
img_w = img.shape[1]
img_h = img.shape[0]
img_area = img_w * img_h
random_scale = np.random.random()
random_scale *= (self.crop_area_range[1] -
self.crop_area_range[0])
random_scale += self.crop_area_range[0]
target_area = random_scale * img_area
random_ratio = np.random.random()
random_ratio *= (self.aspect_ratio_range[1] -
self.aspect_ratio_range[0])
random_ratio += self.aspect_ratio_range[0]
w = np.round(np.sqrt(target_area * random_ratio))
h = np.round(np.sqrt(target_area / random_ratio))
if np.random.random() < 0.5:
w, h = h, w
w = min(w, img_w)
w_rel = w / img_w
w = int(w)
h = min(h, img_h)
h_rel = h / img_h
h = int(h)
x = np.random.random() * (img_w - w)
x_rel = x / img_w
x = int(x)
y = np.random.random() * (img_h - h)
y_rel = y / img_h
y = int(y)
img = img[y:y+h, x:x+w]
new_targets = []
for box in targets:
cx = 0.5 * (box[0] + box[2])
cy = 0.5 * (box[1] + box[3])
if (x_rel < cx < x_rel + w_rel and
y_rel < cy < y_rel + h_rel):
xmin = (box[0] - x_rel) / w_rel
ymin = (box[1] - y_rel) / h_rel
xmax = (box[2] - x_rel) / w_rel
ymax = (box[3] - y_rel) / h_rel
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(1, xmax)
ymax = min(1, ymax)
box[:4] = [xmin, ymin, xmax, ymax]
new_targets.append(box)
new_targets = np.asarray(new_targets).reshape(-1, targets.shape[1])
return img, new_targets
def generate(self, train=True):
while True:
if train:
shuffle(self.train_keys)
keys = self.train_keys
else:
shuffle(self.val_keys)
keys = self.val_keys
inputs = []
targets = []
for key in keys:
img_path = self.path_prefix + key
img = imread(img_path).astype('float32')
y = np.array(self.gt[key].copy())
if train and self.do_crop:
img, y = self.random_sized_crop(img, y)
img = imresize(img, self.image_size).astype('float32')
if img.shape != self.image_size + (3,): #If image doesnt have three dims, skip
continue
if train:
shuffle(self.color_jitter)
for jitter in self.color_jitter:
img = jitter(img)
if self.lighting_std:
img = self.lighting(img)
if self.hflip_prob > 0:
img, y = self.horizontal_flip(img, y)
if self.vflip_prob > 0:
img, y = self.vertical_flip(img, y)
y = self.bbox_util.assign_boxes(y)
inputs.append(img)
targets.append(y)
if len(targets) == self.batch_size:
tmp_inp = np.array(inputs)
tmp_targets = np.array(targets)
inputs = []
targets = []
yield preprocess_input(tmp_inp), tmp_targets
class UnseenImagesGenerator(image.Iterator):
def __init__(self, directory,
target_size=(256, 256), color_mode='rgb',
dim_ordering='th',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None):
self.directory = directory
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.dim_ordering = dim_ordering
if self.color_mode == 'rgb':
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
# first, count the number of samples and classes
self.nb_sample = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.nb_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in sorted(os.listdir(subpath)):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.nb_sample += 1
print('Found %d images belonging to %d classes.' % (self.nb_sample, self.nb_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.nb_sample,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in sorted(os.listdir(subpath)):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
self.filenames.append(os.path.join(subdir, fname))
i += 1
super(UnseenImagesGenerator, self).__init__(self.nb_sample, batch_size, shuffle, seed)
def next(self):
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape)
filenames = []
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
filenames.append(fname)
img = load_img(os.path.join(self.directory, fname), grayscale=grayscale, target_size=self.target_size)
x = img_to_array(img, dim_ordering=self.dim_ordering)
batch_x[i] = x
# build batch of labels
return preprocess_input(batch_x), filenames
#Create priors for BBoxUtil to work with, consistent with live model.
def create_bbox_with_priors(model, num_classes, target_size):
gen = image.ImageDataGenerator()
batches = gen.flow_from_directory("pics", target_size=target_size, class_mode="categorical", shuffle=True, batch_size=1)
model.compile(Adam(lr=0.001), loss= 'categorical_crossentropy', metrics=['accuracy'])
priors_format = model.predict_generator(batches, batches.samples)[0, :, -8:]
return BBoxUtility(num_classes, priors_format) | UTF-8 | Python | false | false | 23,024 | py | 9 | ssd_utils.py | 6 | 0.528101 | 0.514246 | 0 | 548 | 41.016423 | 124 |
kidino/mydev | 7,911,329,769,020 | 0b538229347bded92c7b6639504550d69c8dbb0e | 5133beb9ccf2bc3e968b7f30d8b98a83ef9d02a6 | /site/pelicanconf.py | 9c1983101f97085c8ca9b2a7ed089f6a4f4a6e50 | [] | no_license | https://github.com/kidino/mydev | 5e6369f8e01118fcb937c430e4ee1f5d5ebd5213 | adf6ad3dd337d507e1122d586e67fbd57d424924 | refs/heads/master | 2020-11-30T13:02:22.253231 | 2013-07-14T22:17:41 | 2013-07-14T22:17:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
import os
HERE = os.path.abspath(os.path.dirname(__file__))
PATH = os.path.join(HERE, 'content')
AUTHOR = u'team@mydev.my'
SITENAME = u'Laman Kolaboratif MyDev'
SITEURL = 'http://www.mydev.my'
RELATIVE_URLS = False
TIMEZONE = 'Asia/Kuala_Lumpur'
DEFAULT_LANG = u'en'
# Blogroll - This is required
LINKS = (('Pelican', 'http://docs.notmyidea.org/alexis/pelican/'),
('Python.org', 'http://python.org'),
('Jinja2', 'http://jinja.pocoo.org'),
('You can modify those links in your config file', '#'),)
# Social widget - This is required
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = 5
MD_EXTENSIONS = ['toc', 'codehilite', 'extra']
| UTF-8 | Python | false | false | 791 | py | 3 | pelicanconf.py | 2 | 0.621997 | 0.618205 | 0 | 28 | 27.25 | 68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.