repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
unknownboyy/GUVI | 2,293,512,539,827 | 32cbac996b68c775bed6fc8dd8b1a4e8e120ae3a | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /temp2.py | fa4f70167eab54a948ac4e5778896222ead255f2 | []
| no_license | https://github.com/unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from time import time
# Recursion
def fibo1(n):
if n<=1:
return n
return fibo1(n-2)+fibo1(n-1)
n = 33
f = [-1]*(n+1)
call = 0
# Memoization
def fibo2(n):
if f[n]==-1:
if n<=1: return n
else: f[n]=fibo2(n-2)+fibo2(n-1)
return f[n]
# Tabulation
def fibo3(n):
ff = [0,1]
for i in range(2,n+1):
ff.append(ff[-1]+ff[-2])
return ff[n]
start = time()
fibo1(n)
lap1 = time()
fibo2(n)
lap2 = time()
fibo3(n)
end = time()
print('Time in Recursion',lap1-start)
print('Time in Memoization',lap2-lap1)
print('Time in Tabulation',end-lap2) | UTF-8 | Python | false | false | 595 | py | 358 | temp2.py | 335 | 0.571429 | 0.514286 | 0 | 36 | 15.555556 | 42 |
Vincc/snakeReinforcementLearning | 15,659,450,774,219 | 52b88341a8ca86925dc2684b9c0456addb4cd5fb | 4cf20e1247223281fb36c21a1dda8a921ba22647 | /SnakeEnv/snake_Env/envs/snake_env.py | b7efbab5890e7439ffa566b0459027d2bc8a3345 | []
| no_license | https://github.com/Vincc/snakeReinforcementLearning | 8cc73f7a57816cc0cf293efe294145b9e3670b5e | 04fb0913f7fa257f7ebb43884adb8223cf5db155 | refs/heads/master | 2022-12-31T15:58:05.539107 | 2020-10-28T04:11:22 | 2020-10-28T04:11:22 | 301,163,505 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.spaces import Discrete, Box
from gym.envs.classic_control import rendering
import pygame
from time import sleep
from random import randint
import sys
import numpy as np
class SnakeEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self):
self.size = 500
self.numcell = 50
self.gamespeed = 0.05
pygame.init()
self.done = False
self.screen = pygame.display.set_mode((self.size, self.size))
self.carl = [(1, 1)]
self.direc = (0, 0)
self.foodp = False
self.ap = False
self.food = None
self.reward = 0
self.state = []
self.add = []
self.viewer = None
self.discrete_actions = [0, 1, 2, 3]
self.action_space = Discrete(len(self.discrete_actions))
self.observation_space = Box(low=0, high=255, dtype=np.uint8 ,shape=(
self.size, self.size, 3))
def getpos(self, pos):
return (pos - 1) * (self.size / self.numcell)
def step(self, action):
self.screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Over")
return [self.get_state(), self.reward, self.done]
# set direction
if action == 0:
self.direc = (-1, 0)
elif action == 1:
self.direc = (1, 0)
elif action == 2:
self.direc = (0, -1)
elif action == 3:
self.direc = (0, 1)
# check presence of food ans spawn food if needed
if not self.foodp:
self.food = (randint(0, self.numcell), randint(0, self.numcell))
self.foodp = True
# draw food
pygame.draw.rect(self.screen, (255, 0, 0), (self.getpos(self.food[0]), self.getpos(self.food[1]), self.size / self.numcell, self.size / self.numcell))
# run snake updates
self.carl.insert(0, tuple(map(sum, zip(self.carl[0], self.direc))))
if not self.ap:
self.carl.pop()
ap = False
# draw carl
for i in self.carl:
pygame.draw.rect(self.screen, (255, 255, 255), (self.getpos(i[0]), self.getpos(i[1]), self.size / self.numcell, self.size / self.numcell))
# check death
if self.getpos(self.carl[0][1]) < 0 or self.getpos(self.carl[0][0]) < 0 or self.getpos(self.carl[0][1]) > self.size or self.getpos(self.carl[0][0]) > self.size or \
self.carl[0] in self.carl[1:len(self.carl)]:
self.done = True
self.reward -= 100
print("Over")
self.reset()
# check food after position update
if self.carl[0] == self.food:
self.reward += 100
self.ap = True
self.foodp = False
sleep(self.gamespeed)
pygame.display.update()
return [self.get_state(), self.reward, self.done]
def get_state(self):
state = np.fliplr(np.flip(np.rot90(pygame.surfarray.array3d(pygame.display.get_surface()).astype(np.uint8)),axis=0))
return state
def reset(self):
self.screen.fill((0, 0, 0))
self.done = False
self.carl = [(1, 1)]
self.direc = (0, 0)
self.foodp = False
self.ap = False
self.food = None
for i in self.carl:
pygame.draw.rect(self.screen, (255, 255, 255), (self.getpos(i[0]), self.getpos(i[1]), self.size / self.numcell, self.size / self.numcell))
pygame.display.update()
return self.get_state()
def render(self, mode='human', close=False):
img = self.get_state()
if mode == 'human':
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
elif mode == 'rgb_array':
return img
def sample(self):
return np.random.choice(self.discrete_actions) | UTF-8 | Python | false | false | 4,011 | py | 7 | snake_env.py | 6 | 0.557218 | 0.531538 | 0 | 112 | 34.821429 | 172 |
gleekzorp/pyclinic | 11,879,879,542,769 | d78d6c0de870d3b1468c5eddae73d7ae9059f9a6 | 58f7cdfd4217c5199ce244ee68adfeee2339810d | /pyclinic/openapi.py | 9a38520228584d0f8411c31827a025875f8686dd | [
"MIT"
]
| permissive | https://github.com/gleekzorp/pyclinic | 00fb7c6bd0ac2b00d01f9cd3a38c7fe3ad19139c | 4e95d0a49b536bc08884b74668b833ab03c69c2a | refs/heads/main | 2023-07-12T20:31:32.003003 | 2021-08-18T19:35:49 | 2021-08-18T19:35:49 | 395,820,298 | 0 | 0 | MIT | true | 2021-08-13T22:54:45 | 2021-08-13T22:54:45 | 2021-08-13T16:27:36 | 2021-08-13T16:27:34 | 157 | 0 | 0 | 0 | null | false | false | import os
import re
import json
import logging
from typing import Dict, List, Optional, Tuple
import yaml
import requests
from rich.console import Console
from pyclinic.normalize import normalize_class_name, normalize_function_name
console = Console()
_logger = logging.getLogger(__name__)
METHODS = [
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
]
def load_openapi_spec_from_file(path) -> Dict:
if not os.path.exists(path):
raise FileNotFoundError(path)
with open(path, "r") as f:
if path.endswith(".yaml") or path.endswith(".yml"):
return yaml.load(f, Loader=yaml.FullLoader)
elif path.endswith(".json"):
return json.load(f)
else:
raise ValueError("Unsupported file format: {}".format(path))
def extract_base_url_from_spec(spec: Dict) -> Tuple[str, Dict]:
"""Extract the server urls from the spec to be used when generating functions.
Args:
spec: The OpenAPI Spec dictionary to extract the urls from.
Returns:
A tuple of the first BASE_URL (as default) and a variables dictionary that sets the BASE_URL.
Exceptions:
ValueError: If the spec has no URLs in its "servers" section.
"""
if len(spec["servers"]) == 0:
raise ValueError("No server URLs found in spec")
base_urls = [s["url"] for s in spec["servers"]]
variables = {"BASE_URL": base_urls[0]}
return base_urls[0], variables
def find_requests(spec: Dict) -> Dict:
"""
Returns a Dict like:
{
'pets': {
'create_a_pet': {'method': 'POST', 'url': 'http://petstore.swagger.io/v1/pets'},
'info_for_a_pet': {'method': 'GET', 'url': 'http://petstore.swagger.io/v1/pets/:petId'},
'list_pets': {'method': 'GET', 'url': 'http://petstore.swagger.io/v1/pets?limit=-71686804'},
},
}
"""
url, _ = extract_base_url_from_spec(spec)
paths = spec["paths"]
folders = {}
for path in paths.keys():
folder_name = path.split("/")[-1]
folder_name = normalize_class_name(folder_name)
folders[folder_name] = {}
for method in [k for k in paths[path].keys() if k in METHODS]:
function_name = normalize_function_name(paths[path][method]["summary"])
folders[folder_name][function_name] = {"method": method.upper(), "url": url + path}
return folders
# def replace_variables_with_actual_values(folders: Dict, variables: Dict) -> Dict:
# """Replace OpenApi Variables in the given object with the actual values found in the variables dictionary."""
# OPENAPI_VARIABLE = r"\{([^}]+)\}" # {variable}
# # TODO: Find a way to do this without 4 for loops
# for folder_name, request in folders.items():
# for request_name, request_value in request.items():
# for key, value in request_value.items():
# for found_var in re.findall(OPENAPI_VARIABLE, string=value):
# found_value = variables.get(found_var)
# if found_value is None:
# _logger.error(f"Variable <{found_var}> returned a None value")
# found_value = "VARIABLE_VALUE_NOT_FOUND"
# else:
# pass
# request_value[key] = value.replace("{" + found_var + "}", variables.get(found_var))
# return folders
def build_request_to_send(request: Dict, variables: Dict) -> Dict:
"""
{
"method": "GET",
"url": "{BASE_URL}/Accounts/v1",
}
"""
OPENAPI_VARIABLE = r"\{([^}]+)\}" # {variable}
for key, value in request.items():
for found_var in re.findall(OPENAPI_VARIABLE, string=value):
found_value = variables.get(found_var)
if found_value is None:
_logger.warning(f"Variable {{{found_var}}} was not found in the Variables provided")
else:
request[key] = value.replace("{" + found_var + "}", str(found_value))
return request
class OpenApiExecutableRequest:
def __init__(self, name: str, request: Dict):
self.name = name
self.request = request
def __call__(self, variables: Dict = None, **kwargs):
if kwargs:
self.request.update(kwargs)
if variables:
self.request = build_request_to_send(self.request, variables)
response = requests.request(**self.request)
return response
def help(self):
print(), console.rule(f"[bold green]Request Dictionary for {self.name}[/bold green]")
console.print(self.request)
class OpenApiFolder:
def __init__(self, folder_name: str, folder: Dict[str, OpenApiExecutableRequest]):
"""A flat dictionary with request names as keys and their executables as the values."""
self.name = folder_name
self.folder = folder
def __getattr__(self, name):
if name not in self.folder:
raise ValueError("OpenApi Spec doesn't have a request with the summary of: " + name)
return self.folder[name]
def help(self) -> List[str]:
"""Display all of the executable functions within this folder."""
print(), console.rule(f"[bold green]Function Names in {self.name} Path[/bold green]")
if len(self.folder) == 0:
console.print("No functions found!", style="bold red")
return
keys = [key for key in self.folder.keys()]
for i, key in enumerate(keys):
console.print(f"{str(i+1):5s} [light_sky_blue1]{key}[/light_sky_blue1]", style="white")
console.print("\nExample Usage", style="bold u gold1")
console.print(f"\nresponse = runner.{self.name}.{keys[-1]}()")
return keys
def map_requests_to_executable_functions(folders: Dict, variables: Dict) -> Dict:
"""Map requests to executable functions to be used by OpenApi runner.
Returns a Dict like:
{
"Pets": OpenApiFolder({
"list_all_pets": OpenApiExecutableRequest(request_to_send),
"create_a_pet": OpenApiExecutableRequest(request_to_send),
}),
"PetId": OpenApiFolder({
"info_for_a_pet": OpenApiExecutableRequest(request_to_send)
}),
}
runner = OpenApiRunner(spec_path, variables)
runner.Pets.list_all_pets()
runner.PetId.info_for_a_pet(pet_id)
"""
for folder_name in folders.keys():
folder = folders[folder_name]
for request_name in folder.keys():
request = build_request_to_send(folder[request_name], variables)
folder[request_name] = OpenApiExecutableRequest(request_name, request)
folders[folder_name] = OpenApiFolder(folder_name, folder)
return folders
class OpenApi:
"""Instance of a OpenAPI Spec Runner with executable request functions.
Args:
spec_path: The path to the OpenApi spec file (.yaml | .yml | .json)
user_variables: An optional dict of variables to be used by OpenApi
Format:
OpenApi.OpenApiFolderRequest.OpenApiExecutableRequest(**kwargs)
Examples:
# Instantiate a OpenApi object
runner = OpenApi(spec_path, user_variables={"USERNAME": "foo", "PASSWORD": "bar"})
# Then call the request function
runner.Pets.list_all_pets()
runner.Accounts.create_a_user(data={"username": "johndoe", "password": "secret"})
* You can override the Request that the endpoint function sends. Here are some common options:
headers: A list of headers to send with the request.
method: The HTTP method to use.
url: The URL to send the request to.
params: A dictionary of parameters to send with the request.
data: The body of the request.
"""
def __init__(
self,
spec_path: str,
user_variables: Optional[Dict] = None,
):
self.spec = load_openapi_spec_from_file(spec_path)
self.title = self.spec["info"]["title"]
self.variables = user_variables or {}
self.folders = self.__load()
def __load(self):
reqs = find_requests(self.spec)
return map_requests_to_executable_functions(reqs, self.variables)
def __getattr__(self, name):
if name not in self.folders:
raise ValueError("OpenApi Spec doesn't have a path named: " + name)
folder = self.folders[name]
return folder
def show_folders(self):
"""Display all folders and functions found in this Postman Collection."""
print(), console.rule(f"[bold green]Folders (aka Paths) found in {self.title} Postman Collection[/bold green]")
if len(self.folders) == 0:
console.print("No folders found!", style="bold red")
return
folder_names = list(self.folders.keys())
for folder_name in folder_names:
display = f"\n{folder_name}." if folder_name != "Root" else "\n*Root."
console.print(display, style="bold light_green")
for request_name in sorted(self.folders[folder_name].folder.keys()):
console.print(f"\t{request_name}", style="light_sky_blue1")
example_folder = folder_names[-1]
example_function = list(self.folders[example_folder].folder.keys())[-1]
console.print("\nExample Usage", style="bold u gold1")
console.print(f"\nresponse = runner.{example_folder}.{example_function}()")
def show_variables(self):
"""Display all variables that this instance of Postman was instantiated with."""
print(), console.rule(f"[bold green]{self.title} instantiated with these Variables[/bold green]")
console.print(self.variables)
| UTF-8 | Python | false | false | 9,734 | py | 9 | openapi.py | 4 | 0.610438 | 0.607458 | 0 | 266 | 35.593985 | 119 |
kms08452/CoroNet | 6,425,271,092,689 | a6b6338891c091f01c7b34a971e82fa5f97af29a | 3363253c15a1c79f0dea133b7a0731f9d51f109d | /Corona_pmid/Corona_pmid.py | 90b295939069a8303207e773c213ff9ba0854db2 | []
| no_license | https://github.com/kms08452/CoroNet | d516f38f423b4276fb94c214ae6e649c5e689048 | 37cc036857ff31c1d0c927ffac8374af45a18e01 | refs/heads/master | 2021-05-20T00:43:16.489352 | 2020-07-27T07:24:13 | 2020-07-27T07:24:13 | 252,110,745 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import io
import json
import sys
import time
import random
def web_request(method_name, url, dict_data, is_urlencoded=True, timeout_seconds=3):
"""Web GET or POST request를 호출 후 그 결과를 dict형으로 반환 """
method_name = method_name.upper() # 메소드이름을 대문자로 바꾼다
if method_name not in ('GET', 'POST'):
raise Exception('method_name is GET or POST plz...')
if method_name == 'GET': # GET방식인 경우
response = requests.get(url=url, params=dict_data, timeout=timeout_seconds)
elif method_name == 'POST': # POST방식인 경우
if is_urlencoded is True:
response = requests.post(url=url, data=dict_data, \
timeout=timeout_seconds,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
else:
response = requests.post(url=url, data=json.dumps(dict_data), \
timeout=timeout_seconds, headers={'Content-Type': 'application/json'})
dict_meta = {'status_code': response.status_code, 'ok': response.ok, 'encoding': response.encoding,
'Content-Type': response.headers['Content-Type']}
if 'json' in str(response.headers['Content-Type']): # JSON 형태인 경우
return {**dict_meta, **response.json()}
else: # 문자열 형태인 경우
return {**dict_meta, **{'text': response.text}}
def web_request_retry(num_retry=3, sleep_seconds=1, **kwargs):
"""timeout발생 시 sleep_seconds쉬고 num_retyrp번 재시도 한다"""
for n in range(num_retry):
try:
return web_request(**kwargs)
except requests.exceptions.Timeout:
print(str(n + 1) + ' Timeout')
time.sleep(sleep_seconds)
continue
return None
def SubmitPMIDList_BERN(Inputfile):
json = {}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
#
# load pmids
#
f_in = open(Inputfile,"r")
f2_out = open("error_log.txt", "w")
cnt = 1
f_out = open("COVID19_pubmed_result.txt","w")
while True:
line = f_in.readline().rstrip("\n")
if not line : break
if (cnt <= 20129):
cnt = cnt + 1
continue
time.sleep(random.randint(10, 30))
try:
r = requests.get("https://bern.korea.ac.kr/pubmed/" + line + "/pubtator",headers = headers, timeout = 60)
except requests.exceptions.Timeout:
print('Timeout Error : ' + line)
time.sleep(60)
try:
r = requests.get("https://bern.korea.ac.kr/pubmed/" + line + "/pubtator", headers=headers, timeout = 60)
except requests.exceptions.Timeout:
print('Timeout Error : ' + line)
time.sleep(60)
try:
r = requests.get("https://bern.korea.ac.kr/pubmed/" + line + "/pubtator", headers=headers, timeout=500)
except requests.exceptions.Timeout:
print('Timeout Error : ' + line)
f2_out.write('Timeout Error : ' + line + '\n')
f2_out.flush()
print(cnt)
cnt = cnt + 1
continue
if r.status_code != 200:
print("[Error]: HTTP code " + str(r.status_code))
f2_out.write("[Error]: HTTP code " + str(r.status_code) + ":" + line + '\n')
f2_out.flush()
else:
f_out.write(r.text)
f_out.write("!#$@!#$@!#$@!#$@\n")
f_out.flush()
print(cnt)
cnt = cnt+1
f_out.close()
f_in.close()
f2_out.close()
def SubmitPMIDList(Inputfile, Format, Bioconcept):
json = {}
#
# load pmids
#
with io.open(Inputfile, 'r', encoding="utf-8") as file_input:
json = {"pmids": [pmid.strip() for pmid in file_input.readlines()]}
f = open("Chemical_result.txt","w")
#
# load bioconcepts
#
if Bioconcept != "":
json["concepts"] = Bioconcept.split(",")
#
# request
#
r = requests.post("https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/" + Format, json=json)
if r.status_code != 200:
print("[Error]: HTTP code " + str(r.status_code))
else:
text = r.text.split("\n")
for i in text:
if("Chemical" in i ):
f.write(i)
f.write("\n")
f.close()
if __name__ == "__main__":
SubmitPMIDList_BERN("./COVID19_pubmed_list.txt")
#SubmitPMIDList("./pmid_list_0325.txt", "pubtator", "")
#
# arg_count = 0
# for arg in sys.argv:
# arg_count += 1
# if arg_count < 2 or (sys.argv[2] != "pubtator" and sys.argv[2] != "biocxml" and sys.argv[2] != "biocjson"):
# print("\npython SubmitPMIDList.py [InputFile] [Format] [BioConcept]\n\n")
# print("\t[Inputfile]: a file with a pmid list\n")
# print("\t[Format]: pubtator (PubTator), biocxml (BioC-XML), and biocjson (JSON-XML)\n")
# print(
# "\t[Bioconcept]: gene, disease, chemical, species, proteinmutation, dnamutation, snp, and cellline. Default includes all.\n")
# print("\t* All input are case sensitive.\n\n")
# print("Eg., python SubmitPMIDList.py examples/ex.pmid pubtator gene,disease\n\n")
# else:
# Inputfile = sys.argv[1]
# Format = sys.argv[2]
# Bioconcept = ""
# if arg_count >= 4:
# Bioconcept = sys.argv[3]
# #/home/yonsei/Documents/Corona19/ExampleCode.Python/examples/ex.pmid
# #SubmitPMIDList(Inputfile, Format, Bioconcept)
# | UTF-8 | Python | false | false | 5,825 | py | 26 | Corona_pmid.py | 22 | 0.547064 | 0.531464 | 0 | 155 | 35.812903 | 139 |
Ahmedabied/the-hole | 7,327,214,225,538 | e798b41fe5e4f107cbc82787dd5b72a4d6797bcc | 5f18a128cc4bcbc04e6157db73cdf5647fd1ebe4 | /uptohole.py | 5db6f17b45f8640caf3c942dffbda71ec7bd7caf | []
| no_license | https://github.com/Ahmedabied/the-hole | a09ceda87c752976189196f77cdf80e00a417c05 | 0f1dad0772de14718ef629e19546373af6294240 | refs/heads/master | 2021-01-02T22:52:36.566710 | 2017-08-05T08:26:23 | 2017-08-05T08:26:23 | 99,372,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ftplib,os,os.path,sys,zipfile,socket
host="host"
username="username"
password="password"
curdir=os.getcwd()
try:
ftp=ftplib.FTP(host,username,password)
print("[#] Connected to : %s"%(str(ftp.host)))
print("[#] Server current Dir : %s"%(ftp.pwd()))
print("[#] Current Device Dir : %s"%(curdir))
print("[#] Server IP : %s"%(socket.gethostbyname(ftp.host)))
print("[#] Current IP : %s"%(socket.gethostbyname(socket.gethostname())))
files=[i for i in ftp.nlst()]
except Exception as e:
print("[x] Cant Connect Right now!")
print("[X] %s"%(e))
exit(0)
def send(file):
fname=file.split("\\")[::-1][0]
if os.path.isdir(file.split(file.split("\\")[::-1][0])[0])==True:
os.chdir(file.split(file.split("\\")[::-1][0])[0])
if os.path.exists(fname)==True:
zipfile.ZipFile(str(fname.split(".")[0])+".zip",mode="w").write(fname)
ftp.storbinary("STOR "+str(str(fname.split(".")[0])+".zip"),open(str(str(fname.split(".")[0])+".zip"), 'rb'))
print("\n[#] Done Uploading\n");print(ftp.nlst())
os.chdir(curdir)
if int(len(sys.argv))==int(2):
send(sys.argv[1])
exit(0)
while True:
f1=os.path.normpath(input("\nEnter File Path To Upload -: "))
if os.path.exists(f1)==True:
send(f1)
else:
print("\n[X] Cant Find The File Speefied! ")
| UTF-8 | Python | false | false | 1,334 | py | 3 | uptohole.py | 2 | 0.593703 | 0.58021 | 0 | 44 | 28.181818 | 111 |
BinaryAlien/itemsetcopier | 317,827,627,187 | 04a7c5cd4c78b2bddc515501925295e326183240 | 81fac885a73cf49de99728a56afc2204527dade3 | /test.py | 6e88f02e0a5e3a3fd7cb95a884a2db567a214175 | [
"MIT"
]
| permissive | https://github.com/BinaryAlien/itemsetcopier | cbef9fcbbc9bbc319a6d83651f3d3cb5b383fabe | b34842b1470dd02a00b1f3a958605a61f7a65d00 | refs/heads/master | 2021-07-22T18:17:23.186203 | 2021-01-23T15:51:37 | 2021-01-23T15:51:37 | 239,313,812 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itemsetcopier import SET_NAME_MAX_LENGTH, Translator, ReturnCode, translate
import unittest
class MobafireTest(unittest.IsolatedAsyncioTestCase):
async def _test(self, set_name, url, build_index, expected_code):
res = await translate(Translator.MOBAFIRE, set_name=set_name, url=url, build_index=build_index)
self.assertEqual(res['code'], expected_code)
async def test_translator(self):
# Valid inputs
await self._test("Jax Top", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', None, ReturnCode.CODE_OK)
await self._test("Jax Top", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 1, ReturnCode.CODE_OK)
await self._test("Jax Jgl", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 2, ReturnCode.CODE_OK)
await self._test("Jax Jgl", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', "2", ReturnCode.CODE_OK)
await self._test("Jax Jgl", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 99999, ReturnCode.CODE_OK)
await self._test("Jax Jgl", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', -99999, ReturnCode.CODE_OK)
await self._test("Jax Top", 'www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 1, ReturnCode.CODE_OK)
# Invalid set name
await self._test("", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 1, ReturnCode.ERR_SET_NAME_LENGTH)
await self._test(('t' * (SET_NAME_MAX_LENGTH + 1)), 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 1, ReturnCode.ERR_SET_NAME_LENGTH)
# Invalid URL
await self._test("Jax Top", 'https://www.google.com', 1, ReturnCode.ERR_OTHER)
await self._test("Jax Top", 'https://https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 1, ReturnCode.ERR_OTHER)
# Other
await self._test(123, 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', 1, ReturnCode.ERR_INVALID_PARAM)
await self._test(None, 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', None, ReturnCode.ERR_INVALID_PARAM)
await self._test("Jax Top", 123, 1, ReturnCode.ERR_INVALID_PARAM),
await self._test("Jax Top", None, 1, ReturnCode.ERR_INVALID_PARAM),
await self._test("Jax Jgl", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-503356', "abc", ReturnCode.ERR_INVALID_PARAM)
await self._test("Jax Top", 'https://www.mobafire.com/league-of-legends/build/10-13-ph45s-in-depth-guide-to-jax-the-grandmaster-000000', 1, ReturnCode.ERR_REMOTE_FAIL)
class MobalyticsTest(unittest.IsolatedAsyncioTestCase):
async def _test(self, champion_key, champion_name, role, expected_code):
res = await translate(Translator.MOBALYTICS, champion_key=champion_key, champion_name=champion_name, role=role)
self.assertEqual(res['code'], expected_code)
async def test_translator(self):
# Valid inputs
await self._test(None, 'Ahri', 'mid', ReturnCode.CODE_OK)
await self._test(103, None, 'mid', ReturnCode.CODE_OK)
await self._test(103, 'Doesnt matter here', 'mid', ReturnCode.CODE_OK)
await self._test(None, 'AhRi', 'mid', ReturnCode.CODE_OK)
await self._test(None, 'AhRi', 'MiD', ReturnCode.CODE_OK)
await self._test(103, None, 'MiD', ReturnCode.CODE_OK)
await self._test("103", None, 'mid', ReturnCode.CODE_OK)
await self._test("103", 'Doesnt matter here', 'mid', ReturnCode.CODE_OK)
await self._test("103", None, 'MiD', ReturnCode.CODE_OK)
# Invalid champion
await self._test(None, 'ttttt', 'mid', ReturnCode.ERR_INVALID_CHAMP)
await self._test(99999, None, 'mid', ReturnCode.ERR_INVALID_CHAMP)
await self._test(99999, 'Ahri', 'mid', ReturnCode.ERR_INVALID_CHAMP)
# Invalid role
await self._test(None, 'Ahri', 'ttttt', ReturnCode.ERR_INVALID_PARAM)
# Other
await self._test(None, None, 'mid', ReturnCode.ERR_INVALID_PARAM)
await self._test("abc", None, 'mid', ReturnCode.ERR_INVALID_PARAM)
await self._test(None, 123, 'mid', ReturnCode.ERR_INVALID_PARAM)
await self._test(None, 'Ahri', 123, ReturnCode.ERR_INVALID_PARAM)
await self._test(None, 'Ahri', None, ReturnCode.ERR_INVALID_PARAM)
class OpggTest(unittest.IsolatedAsyncioTestCase):
async def _test(self, set_name, champion_key, champion_name, role, expected_code):
res = await translate(Translator.OPGG, set_name=set_name, champion_key=champion_key, champion_name=champion_name, role=role)
self.assertEqual(res['code'], expected_code)
async def test_translator(self):
# Valid inputs
await self._test("Graves Jgl", None, 'Graves', 'jungle', ReturnCode.CODE_OK)
await self._test("Graves Jgl", 104, None, 'jungle', ReturnCode.CODE_OK)
await self._test("Graves Jgl", 104, 'Doesnt matter here', 'jungle', ReturnCode.CODE_OK)
await self._test("Graves Jgl", "104", None, 'jungle', ReturnCode.CODE_OK)
await self._test("Graves Jgl", "104", 'Doesnt matter here', 'jungle', ReturnCode.CODE_OK)
# Invalid set name
await self._test("", None, 'Graves', 'jungle', ReturnCode.ERR_SET_NAME_LENGTH)
await self._test(('t' * (SET_NAME_MAX_LENGTH + 1)), None, 'Graves', 'jungle', ReturnCode.ERR_SET_NAME_LENGTH)
# Invalid champion
await self._test("Graves Jgl", None, 'ttttt', 'jungle', ReturnCode.ERR_INVALID_CHAMP)
await self._test("Graves Jgl", 99999, None, 'jungle', ReturnCode.ERR_INVALID_CHAMP)
await self._test("Graves Jgl", 99999, 'Graves', 'jungle', ReturnCode.ERR_INVALID_CHAMP)
# Invalid role
await self._test("Graves Jgl", 'Graves', None, 'ttttt', ReturnCode.ERR_INVALID_PARAM)
# Other
await self._test(None, 'Graves', None, 'jungle', ReturnCode.ERR_INVALID_PARAM)
await self._test(123, 'Graves', None, 'jungle', ReturnCode.ERR_INVALID_PARAM)
await self._test("Graves Jgl", "abc", None, 'jungle', ReturnCode.ERR_INVALID_PARAM)
await self._test("Graves Jgl", None, 123, 'jungle', ReturnCode.ERR_INVALID_PARAM)
await self._test("Graves Jgl", None, None, 'jungle', ReturnCode.ERR_INVALID_PARAM)
await self._test("Graves Jgl", None, 'Graves', 123, ReturnCode.ERR_INVALID_PARAM)
await self._test("Graves Jgl", None, 'Graves', None, ReturnCode.ERR_INVALID_PARAM)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 6,644 | py | 4 | test.py | 2 | 0.724413 | 0.684828 | 0 | 102 | 64.137255 | 197 |
NageshRaykar/quality-analyzer | 2,774,548,906,603 | c1c4ef61ed301cccb90959d3917dc18a2faccce4 | a3d7db054656de34db5fb1e8d68b09c14456afdb | /rapidplugin/tests/resources/pypi/p1/p1.py | cb4483169e33c22a173407a9c95d8c8aa8dcf84b | [
"Apache-2.0"
]
| permissive | https://github.com/NageshRaykar/quality-analyzer | 27ba09fe5a3a6296be14072f547282eae9959633 | f8861865db0683ad37cc42ae858e9fcc004e6b17 | refs/heads/master | 2023-06-23T04:05:29.562209 | 2021-07-22T15:12:21 | 2021-07-22T15:12:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def c():
return
| UTF-8 | Python | false | false | 20 | py | 27 | p1.py | 17 | 0.5 | 0.5 | 0 | 2 | 9 | 10 |
apalade/restaurants | 11,879,879,553,455 | 0f88172e32606e4bd24846c401af1f6d5d6aa29f | 13feefb4f28fcbb2532b33614b217f0c00aab4f2 | /backend/resources/restaurant.py | d7a179c8c5156d96096c52d7250d16bd6e2c0882 | []
| no_license | https://github.com/apalade/restaurants | ab94d89b8590195a42345fa3486cfea624733d20 | c440aa2f1c8b220c5e664651b2045d2cfcf7ed5c | refs/heads/main | 2023-01-03T07:51:51.189634 | 2020-10-29T09:40:59 | 2020-10-29T09:40:59 | 308,280,853 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Optional, List
from fastapi import HTTPException
from fastapi_utils.cbv import cbv
from fastapi_utils.inferring_router import InferringRouter
from sqlalchemy import orm
from resources._base import BaseLoggedInResource
import schemas
import models
router = InferringRouter()
@cbv(router)
class RestaurantResource(BaseLoggedInResource):
@router.get("/restaurants", response_model=List[schemas.Restaurant])
async def get(self, owner_id: Optional[int] = None):
restaurants = self.db.query(models.Restaurant).\
filter(models.Restaurant.deleted.is_(False))
if owner_id is not None:
restaurants = restaurants.filter(
models.Restaurant.owner_id == owner_id)
return restaurants.all()
@router.post("/restaurant", response_model=schemas.Restaurant)
async def create(self, data: schemas.RestaurantCreate):
await self.verify_is_owner()
params = data.dict()
params['owner_id'] = self.user.id
restaurant = models.Restaurant(**params)
self.db.add(restaurant)
self.db.commit()
self.db.refresh(restaurant)
return restaurant
@router.put("/restaurant", response_model=schemas.Restaurant)
async def update(self, data: schemas.RestaurantUpdate):
await self.verify_is_owner()
restaurant = self._get_one(data.id)
if restaurant.owner_id != self.user.id:
raise HTTPException(status_code=403,
detail="You do not have access to"
"edit this restaurant.")
restaurant.name = data.name
restaurant.description = data.description
self.db.commit()
self.db.refresh(restaurant)
return restaurant
@router.delete("/restaurant")
async def delete(self, rid: int):
await self.verify_is_owner()
restaurant = self._get_one(rid)
if restaurant.owner_id != self.user.id:
raise HTTPException(status_code=403,
detail="You do not have access"
" to delete this restaurant.")
restaurant.deleted = True
#self.db.delete(restaurant)
self.db.commit()
def _get_one(self, id: int):
try:
restaurant = self.db.query(models.Restaurant).filter(
models.Restaurant.id == id).one()
except orm.exc.MultipleResultsFound:
# This really shouldn't happen because of the unique constrain
raise HTTPException(status_code=403,
detail="Multiple restaurant IDs "
"with same value.")
except orm.exc.NoResultFound:
raise HTTPException(status_code=403,
detail="Invalid restaurant ID.")
else:
return restaurant
| UTF-8 | Python | false | false | 2,908 | py | 39 | restaurant.py | 25 | 0.607978 | 0.603851 | 0 | 84 | 33.619048 | 74 |
Jaskaran23/Programming-for-big-data-1-cmpt-732 | 3,135,326,140,027 | 49329e25fa7afeb4569ee6ed588bd5054d4a3157 | 32fc071128fd7fd104a23fa60fbca0acf003faeb | /Assignment4/relative_score_bcast.py | 050bd09504e1f350c94ffde3c537f6f2cafc1c3c | []
| no_license | https://github.com/Jaskaran23/Programming-for-big-data-1-cmpt-732 | 2b7f4a2741e3e95a5f46b0d772568e1bea2b8732 | ac4446b43af669070be2aadc93ce5476cfcfe2a9 | refs/heads/master | 2020-04-21T03:23:29.085651 | 2019-02-05T18:01:24 | 2019-02-05T18:01:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyspark import SparkConf, SparkContext
import sys
import json
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
def key_valuepair(dictvalue):
reddit_key=dictvalue.get("subreddit")
score_key=dictvalue.get("score")
yield (reddit_key,(1,score_key))
def adding_pairs(i,j):
sumone=0
scoresum=0
sumone = i[0]+j[0]
scoresum = i[1]+j[1]
return (sumone,scoresum)
def dividing_pairs(k):
average = 0.0
average = (k[1][1]/k[1][0])
return(k[0],average)
def broadcast_func(broadcast_obj1,y):
subreddit_val=y[0]
return(y[1]['score'] / broadcast_obj1.value[y[0]],y[1]['author'])
def main(inputs, output):
text=sc.textFile(inputs).map(json.loads).cache()
pairs=text.flatMap(key_valuepair)
add_pair=pairs.reduceByKey(adding_pairs)
avg_pair=add_pair.map(dividing_pairs).filter(lambda x: (x[1]>0))
avg_obj=dict(avg_pair.collect())
broadcast_obj=sc.broadcast(avg_obj)
commentbysub = text.map(lambda c: (c['subreddit'],c))
result=commentbysub.map(lambda x: broadcast_func(broadcast_obj,x)).sortBy(lambda z: z,False)
print(result.take(10))
#result=joined_rdd.map(lambda z: (z[0],z[1][0][0]/z[1][1],z[1][0][1])).sortBy(False)
outdata=result.map(json.dumps)
outdata.saveAsTextFile(output)
if __name__ == '__main__':
conf = SparkConf().setAppName('reddit average')
sc = SparkContext(conf=conf)
assert sc.version >= '2.3' # make sure we have Spark 2.3+
inputs = sys.argv[1]
output = sys.argv[2]
main(inputs, output)
| UTF-8 | Python | false | false | 1,482 | py | 24 | relative_score_bcast.py | 24 | 0.682186 | 0.653171 | 0 | 59 | 24.118644 | 93 |
abos5/pythontutor | 9,603,546,882,253 | f11946ce30e0f8bd0260ca563b615e9e6326774f | 7bc5f02dab36eb41e4245059b0830830f828b0fe | /dive/regression.py | 4253cf075c4430e75e69f5d21bee894ed04e50bb | [
"MIT"
]
| permissive | https://github.com/abos5/pythontutor | 3380686cec0ce6a7000fdb0ad424227e8a489684 | eba451700def8bd98d74668d1b6cc08c0ccc0d3c | refs/heads/master | 2020-04-10T18:05:52.438924 | 2018-12-10T15:16:37 | 2018-12-10T15:16:37 | 70,141,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Regression testing framework
This module will search for scripts in the same directory named \
XYZtest.py. Each such script should be a test suit that tests a \
module through PyUnit.
"""
import sys
import os
import re
import unittest
from toolbox import exit
def regressionTest():
path = os.path.abspath(os.path.dirname(sys.argv[0]))
# obtain the required files
files = os.listdir(path)
test = re.compile('test\.py$', re.IGNORECASE)
files = filter(test.search, files)
# get rid of the extension
filenameToModuleName = lambda f: os.path.splitext(f)[0]
moduleNames = map(filenameToModuleName, files)
modules = map(__import__, moduleNames)
load = unittest.defaultTestLoader.loadTestsFromModule
return unittest.TestSuite(map(load, modules))
exit()
if __name__ == "__main__":
unittest.main(defaultTest="regressionTest")
# end of file
| UTF-8 | Python | false | false | 894 | py | 124 | regression.py | 88 | 0.709172 | 0.706935 | 0 | 35 | 24.542857 | 65 |
majhar-nayem/Software-Engineering-Project | 14,035,953,162,587 | e73e2a8eaefcc3cb9e43e2a878af41afe8385fa5 | 7992752f110aceaa47fa1951c333a3cbb6209684 | /smartmenu/food/urls.py | 051965ae13806efd6365cf8f56cba25004cf8c0a | [
"MIT"
]
| permissive | https://github.com/majhar-nayem/Software-Engineering-Project | 42f48a29e44e2a3c2b94f11786cfddaaff812aea | e768cc738054d4df3453ee2d42ae03c4d6948152 | refs/heads/master | 2020-04-19T19:12:46.562875 | 2019-06-23T15:31:40 | 2019-06-23T15:31:40 | 168,382,824 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path, include
from . import views
from .views import FoodListView, FoodDetailsView, FoodCategoryView
# api url handler
urlpatterns = [
path('category/', FoodCategoryView.as_view(), name='food-category'),
path('foodlist/', FoodListView.as_view(), name='food-list'),
path('foodlist/<int:pk>/', FoodDetailsView.as_view(), name='food-details'),
path('catlist/<int:pk>/', views.CatList, name='catlist'),
]
| UTF-8 | Python | false | false | 455 | py | 20 | urls.py | 14 | 0.685714 | 0.685714 | 0 | 12 | 35.916667 | 79 |
alehlipka/TarotMG-server | 6,012,954,263,639 | daae6fec76d9cbb54f25f157326db290c84e7d0d | cab10378580423bd2f76407b773dc2a007eb16b6 | /settings.py | 4224924271cb9a9df6c8e6765a9fc21bee96c2eb | []
| no_license | https://github.com/alehlipka/TarotMG-server | 4248f1a5464e566a0fa8132a3f49f6f22fafdddb | e9d5ba4c5b6cedaa8500ebfc5ad1eb225108fe73 | refs/heads/master | 2020-06-20T20:00:11.353538 | 2019-08-07T18:24:39 | 2019-08-07T18:24:39 | 197,230,396 | 0 | 1 | null | false | 2019-08-07T23:19:44 | 2019-07-16T16:32:11 | 2019-08-07T18:25:13 | 2019-08-07T18:25:12 | 58 | 0 | 1 | 1 | Python | false | false | """File for settings"""
# for shuffle
NUMBER_OF_SHUFFLE = 15
NUMBER_OF_CARDS = 78
# for Bot
token="" | UTF-8 | Python | false | false | 105 | py | 13 | settings.py | 10 | 0.647619 | 0.609524 | 0 | 8 | 12.25 | 24 |
Catory/mumu_fruits | 6,227,702,588,095 | 2d328309ca6a68eb6ffc69a72ff7aacbcb604071 | b56017aa4f7a40671835688ef7abcc276e0b902b | /appOne/migrations/0008_order.py | 9301f2f8d054e9f4a735017eaf997d53b93a8fb1 | []
| no_license | https://github.com/Catory/mumu_fruits | f51ab10f5353eb578293ee522393bc5067526404 | 86439ec3e2571f429aa4a0eeb85702cc65e8e4d1 | refs/heads/master | 2021-05-11T01:39:00.443031 | 2017-12-23T10:02:59 | 2017-12-23T10:02:59 | 118,335,192 | 0 | 0 | null | true | 2018-01-21T13:01:37 | 2018-01-21T13:01:37 | 2017-12-23T10:03:49 | 2017-12-23T10:03:48 | 547 | 0 | 0 | 0 | null | false | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 06:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('appOne', '0007_user'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orderInfo', models.CharField(max_length=1000)),
('orderUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='appOne.User')),
],
options={
'db_table': 'axf_order',
},
),
]
| UTF-8 | Python | false | false | 793 | py | 30 | 0008_order.py | 13 | 0.566204 | 0.534678 | 0 | 27 | 28.37037 | 114 |
samarthvaru/FarmUp1 | 6,983,616,834,014 | 8e6be4e7c04bb1cf2eaf7ba1a4f290e4a09bff45 | 6b899d257d40365dc3ce09f6a286378432bd152e | /Backend/backend/api/product/serializers.py | 9e5a35fb3ade1869dd7e9965643953adb918105d | []
| no_license | https://github.com/samarthvaru/FarmUp1 | e0b663bcd0066dd183280cf4e4f670d16183488d | 2d0fb358317339b754c55fb431035137b11959a7 | refs/heads/master | 2023-08-10T23:39:33.804508 | 2021-09-20T22:04:09 | 2021-09-20T22:04:09 | 408,607,002 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from api.customerProfile.serializers import FarmerSerializer
from products.models import Product,Tag
from api.customerProfile.serializers import FarmerSerializer
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ['title', 'slug']
class ProductSerializer(serializers.HyperlinkedModelSerializer):
tag_list = TagSerializer(many=True, read_only=True)
sold_by= FarmerSerializer(read_only=True)
class Meta:
model = Product
fields = ['id', 'image', 'title', 'slug',
'featured', 'description', 'original_price', 'price', 'tag_list','sold_by','timestamp']
ordering=['timestamp']
# class ProductListSerializer(serializers.ModelSerializer):
# class Meta:
# model = Product
# fields = [ 'name', 'description',
# 'sold_by']
# class ProductDetailSerializer(serializers.ModelSerializer):
# class Meta:
# model = Product
# fields = ['name', 'description', 'price',
# 'sold_by', 'date',] | UTF-8 | Python | false | false | 1,105 | py | 33 | serializers.py | 31 | 0.648869 | 0.648869 | 0 | 39 | 27.358974 | 105 |
DamonZCR/PythonStu | 11,012,296,152,663 | 5dff900c193dd605ba5f2de56ae29e9d00b61916 | caa72788fdae6b05c5ce4c132b45fc00d55bb607 | /47Tkinter/Canvas画布/18-4Canvas画布画图形.py | 15351acebbe878bea337f0e94b0e4238cf4d307b | []
| no_license | https://github.com/DamonZCR/PythonStu | dcc2ba49195f5859fd63227fe0f8f78b36ed46df | 88fec97e3bccff47ba1c5f521f53a69af6ca2b2d | refs/heads/master | 2023-07-05T06:29:53.300920 | 2021-08-13T12:22:30 | 2021-08-13T12:22:30 | 302,256,563 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
'''显画出一个椭圆'''
root = Tk()
w = Canvas(root, width=200, heigh=100)
w.pack()
# 前两个参数代表起点,后两个代表终点,dash代表线的样式,虚线
w.create_rectangle(40, 20, 160, 80, dash=(4, 4))
w.create_oval(40, 20, 160, 80, fill='pink')
w.create_text(100, 50, text='Damon')
w2 = Canvas(root, width=200, heigh=100)
w2.pack()
w2.create_rectangle(40, 20, 160, 80, dash=(4, 4))
w2.create_oval(70, 20, 130, 80, fill='pink')
w2.create_text(100, 50, text='Damon')
mainloop() | UTF-8 | Python | false | false | 522 | py | 237 | 18-4Canvas画布画图形.py | 230 | 0.667401 | 0.519824 | 0 | 16 | 27.4375 | 49 |
bsmi021/eahub_shopco | 1,494,648,643,497 | 3a1f75459e8eb5cbb45b29d5e415a53af37998d2 | 31c2ef1b6e999ebc0ca6b61ca03e20cc7701e1ff | /simulator/simulator.py | bd4144468340946747b63840530ddd194213e232 | []
| no_license | https://github.com/bsmi021/eahub_shopco | a1c8e71e40bc507f2ded22fa2b56fbdfb3b17184 | 3f4c93c631e5d5b52acd2ce11e220ff3fbec07b6 | refs/heads/master | 2022-12-09T01:22:21.446915 | 2019-06-11T17:40:42 | 2019-06-11T17:40:42 | 175,206,709 | 0 | 0 | null | false | 2022-12-08T01:41:44 | 2019-03-12T12:29:56 | 2019-06-11T17:40:57 | 2022-12-08T01:41:44 | 16,596 | 0 | 0 | 13 | Python | false | false | import requests
from py_linq import Enumerable
import random
import json
from werkzeug.security import generate_password_hash
from faker import Faker
from faker.providers import profile, person, address, phone_number, credit_card
from uszipcode import SearchEngine
import namesgenerator # using this for brands and products
fake = Faker()
fake.add_provider(profile)
fake.add_provider(person)
fake.add_provider(address)
fake.add_provider(phone_number)
fake.add_provider(credit_card)
zip_search = SearchEngine(simple_zipcode=True)
def get_random_city_zip():
state_abbr = fake.state_abbr(include_territories=False)
#print (state_abbr)
state_abbr = 'NY'
#zip_code = fake.zipcode_in_state(state_abbr=state_abbr)
#f_zip_code = zip_search.by_zipcode(zip_code)
zip_code = random.randint(10000,99999)
f_zip_code = zip_search.by_zipcode(zip_code)
while f_zip_code.zipcode is None:
zip_code = random.randint(10000,99999)
f_zip_code = zip_search.by_zipcode(zip_code)
print (f_zip_code)
return f_zip_code
uri = "http://10.104.87.133:5000/api"
brand_uri = "{}/brands".format(uri)
products_uri = "{}/products".format(uri)
users_uri = "{}/user".format(uri)
basket_uri = "{}/basket".format(uri)
customers_uri = "{}/customers".format(uri)
register_uri = f"{customers_uri}/register"
sites_uri = f'{uri}/warehouse/sites'
brand_count = 13
product_count = 555
customer_count = 45000
site_count = 12
order_count = 50000
print('Checking on customers, creating if necessary')
customers = requests.get(customers_uri)
if customers.json() is None or len(customers.json()) < customer_count:
max_range = customer_count - len(customers.json())
print(f'Creating {max_range} Customers')
for i in range(0, max_range):
profile = fake.profile()
person = {
'first_name': str.split(profile['name'])[0],
'last_name': str.split(profile['name'])[1]
}
#
f_zip_code = get_random_city_zip()
address = {
'street_1': fake.street_address(),
'street_2': "",
'city': f_zip_code.major_city,
'state': f_zip_code.state,
'country': 'US',
'zip_code': f_zip_code.zipcode
}
phone = fake.phone_number()
#
account = {
'user_name': profile['username'],
'password_hash': generate_password_hash(profile['username']),
'email': profile['mail'],
'name': person['first_name'],
'last_name': person['last_name'],
'street_1': address['street_1'],
'street_2': address['street_2'],
'city': address['city'],
'state': address['state'],
'zip_code': address['zip_code'],
'country': address['country'],
'phone': phone
}
# request_data = json.dumps(account)
requests.post(register_uri, json=account)
print("Checking on sites, creating if necessary")
sites = requests.get(sites_uri)
if sites.json() is None or len(sites.json()) < site_count:
if len(sites.json()) == 0:
max_range = site_count
else:
max_range = site_count - len(sites.json())
for i in range(0, max_range):
zip_info = get_random_city_zip()
site = {'name': zip_info.post_office_city, 'zip_code': zip_info.zipcode, 'type_id': 1}
requests.post(sites_uri, json=site)
print("Checking on brands, creating if necessary")
brands = requests.get(brand_uri)
if brands.json() is None or len(brands.json()) < brand_count:
if brands.json() is None:
max_range = brand_count
else:
max_range = brand_count - len(brands.json())
print(f'Creating {max_range} brands.')
for i in range(0, max_range):
brand = {'name': namesgenerator.get_random_name()}
request_data = json.dumps(brand)
response = requests.post(brand_uri, json=brand)
brands = requests.get(brand_uri)
brands = brands.json()
print('Checking on products, creating if necessary')
products = requests.get(products_uri)
if products.json() is None or len(products.json()) < 300:
colors = ['white', 'black', 'red', 'blue', 'yellow', 'titanium', 'steel-grey',
'grey', 'green', 'light-blue', 'pink', 'orange']
types = ['kitchen', 'outdoor', 'indoor', 'bedroom', 'living room']
if products.json() is None or len(products.json()) == 0:
max_product_id = 0
max_range = 300
else:
products = Enumerable(products.json())
max_product_id = products.max(lambda x: x['_id']) + 1
max_range = 300 - len(products.json())
print(f'Creating {max_range} products.')
for i in range(max_product_id, max_product_id + max_range):
brand_id = random.choice(brands)['_id']
product_name = namesgenerator.get_random_name(' ')
sku = f"WR-{f'00000000{i + 1}'[-8:]}"
max_stock_threshold = random.randint(105, 1050)
restock_threshold = random.randint(10, 50)
available_stock = random.randint(restock_threshold + 5, max_stock_threshold)
product = {
"name": product_name,
"description": namesgenerator.get_random_name(),
"price": float(random.randint(1, 255)) + .99,
"product_brand_id": brand_id,
"product_type_id": 1,
"discontinued": False,
"max_stock_threshold": max_stock_threshold,
"sku": sku,
"attributes": {
"weight": random.randint(10, 100),
"height": random.randint(1, 100),
"width": random.randint(1, 100),
"depth": random.randint(1, 100),
"color": random.choice(colors),
"type": random.choice(types)
}
}
request_data = json.dumps(product)
response = requests.post(products_uri, json=request_data)
customers = requests.get(customers_uri)
customers = customers.json()
card_types = [{'id': 1, 'value': 'discover'},
{'id': 2, 'value': 'visa16'},
{'id': 3, 'value': 'amex'},
{'id': 4, 'value': 'mastercard'}]
for customer in customers:
provider = random.choice(card_types)
customer['card_type_id'] = provider['id']
customer['card_provider'] = provider['value']
customer['expiration'] = fake.credit_card_expire(start='now', end="+4y", date_format='%m/%y')
customer['card_number'] = fake.credit_card_number(card_type=customer['card_provider'])
customer['security_number'] = fake.credit_card_security_code(card_type=customer['card_provider'])
products = requests.get(products_uri)
products = products.json()
print(f'Creating {order_count} orders now, this may take a while...')
for i in range(0, order_count):
basket_item_ct = random.randint(1, 6)
customer = random.choice(customers)
items = []
for x in range(1, basket_item_ct):
product = random.choice(products)
while product in items:
product = random.choice(products)
items.append(product)
quantity_gen = lambda p: random.randint(1, 5) if p < 20.99 else 1
basket = {
'buyer_id': customer['_id'],
'items': [
{
'product_id': product['_id'],
'product_name': product['name'],
'unit_price': product['price'],
'old_unit_price': product['price'],
'quantity': quantity_gen(product['price'])
} for product in items]
}
response_data = requests.post(basket_uri, json=basket)
if response_data.status_code == 201:
checkout_basket = {
'buyer_id': str(customer['_id']),
'buyer': customer['full_name'],
'city': customer['city'],
'street_1': customer['street_1'],
'street_2': customer['street_2'],
'state': customer['state'],
'country': customer['country'],
'zip_code': customer['zip_code'],
'card_number': customer['card_number'],
'cardholder_name': customer['full_name'],
'expiration': customer['expiration'],
'security_number': customer['security_number'],
'card_type_id': int(customer['card_type_id'])
}
resp = requests.put(f'{basket_uri}/checkout', json=checkout_basket)
if resp.status_code != 204:
print(resp)
else:
print(f'Order sent for Customer {customer["_id"]}: {customer["full_name"]}')
#
# products = requests.get(products_uri)#.json()
# #users = requests.get(users_uri)
# products = products.json()
#
# for product in products:
# print(product['_id'])
#
# buyer_id = 1
# buyer_name = 'Gordon Ramsay'
# address = {
# 'street_1':'1313 Mockingbird Ln',
# 'street_2':None,
# 'city':"Anytown",
# 'state':'NY',
# 'zip_code':'10010',
# 'country':'US'
# }
# email = 'gordon@hellskitchen.com'
# phone = '18005556666'
# card_number='4525242422'
# cardholder_name='Gordon Ramsay'
# security_number='442'
# expiration='01/99'
# card_type_id=1
#
# basket = {
# 'buyer_id':buyer_id,
# 'items':[
# {
# 'product_id':product['_id'],
# 'product_name':product['name'],
# 'unit_price':product['price'],
# 'old_unit_price':product['price'],
# 'quantity': random.randint(1,10)
# } for product in products]
# }
#
# basket = json.dumps(basket)
#
# print(basket)
# resp = requests.post(basket_uri, json=basket)
#
# if resp.status_code == 201:
# checkout_basket={
# 'buyer_id':str(buyer_id),
# 'buyer':buyer_name,
# 'city':address['city'],
# 'street_1':address['street_1'],
# 'street_2':'',
# 'state':address['state'],
# 'country':address['country'],
# 'zip_code':address['zip_code'],
# 'card_number':card_number,
# 'cardholder_name':cardholder_name,
# 'expiration':expiration,
# 'security_number':security_number,
# 'card_type_id':int(card_type_id)
# }
#
# checkout_basket = json.dumps(checkout_basket)
# print (checkout_basket)
#
# resp = requests.put(f'{basket_uri}/checkout', json=checkout_basket)
#
# print(resp.status_code)
#
# #print(products)
| UTF-8 | Python | false | false | 10,315 | py | 60 | simulator.py | 33 | 0.585167 | 0.565778 | 0 | 334 | 29.883234 | 101 |
srajsonu/LeetCode-Solutions-Python | 5,506,148,107,909 | 878912ef1f22329f656c2f1adf5a80e5d3cf0d5c | e8199f1d424592affe19b50fd96a02815067d1b1 | /Stacks/316. Remove Duplicate Letters.py | 263e3329f37f52b66ca25d8db7df936ff29448e8 | []
| no_license | https://github.com/srajsonu/LeetCode-Solutions-Python | 39a809e4c6d555a3a3055ce03d59cfa40b93a287 | 8ec31c8df2885f3da533424ba13060b7d3e3af78 | refs/heads/master | 2023-03-19T10:05:42.578615 | 2021-03-13T17:21:36 | 2021-03-13T17:21:36 | 280,716,200 | 0 | 1 | null | false | 2020-10-06T09:54:02 | 2020-07-18T18:32:04 | 2020-10-02T14:54:51 | 2020-10-02T14:54:48 | 116 | 0 | 1 | 1 | Python | false | false | class Solution:
def solve(self, A):
freq = {}
for i in A:
if i not in freq:
freq[i] = 1
else:
freq[i] += 1
ans = []
vis = set()
for i in A:
freq[i] -= 1
if i in vis:
continue
while ans and ans[-1] > i and freq[ans[-1]] > 0:
vis.remove(ans.pop())
ans.append(i)
vis.add(i)
return ''.join(ans)
if __name__ == '__main__':
A = "cbacdcbc"
B = Solution()
print(B.solve(A))
| UTF-8 | Python | false | false | 584 | py | 304 | 316. Remove Duplicate Letters.py | 303 | 0.369863 | 0.359589 | 0 | 29 | 19.137931 | 60 |
ddtkra/atcoder | 3,779,571,225,556 | 9d562ad42ef5ae47c00e3f8fa5669ba799dbe00a | 0bb474290e13814c2498c086780da5096453da05 | /abc129/D/main.py.1 | 0399bbd378d78ca41a5376c955960f39962963f3 | []
| no_license | https://github.com/ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | false | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | 2020-03-18T09:22:19 | 2022-01-21T20:10:20 | 1,657 | 0 | 0 | 2 | Python | false | false | #!/usr/bin/env python3
import sys
import re
def solve(H: int, W: int, S: "List[str]"):
L = [[0] * W for i in range(H)]
R = [[0] * W for i in range(H)]
U = [[0] * W for i in range(H)]
D = [[0] * W for i in range(H)]
for i in range(H):
for j in range(W):
# 左
if(S[i][j] =='#'):
L[i][j] = 0
elif(j == 0):
L[i][j] = 1
elif(j >= 1):
L[i][j] = L[i][j-1] + 1
# 上
if(S[i][j] =='#'):
U[i][j] = 0
elif(i == 0):
U[i][j] = 1
else:
U[i][j] = U[i-1][j] + 1
for i in range(H-1,-1,-1):
for j in range(W-1,-1,-1):
# 右
if(S[i][j] =='#'):
R[i][j] = 0
elif(j == W-1):
R[i][j] = 1
else:
R[i][j] = R[i][j+1] + 1
# 下
if(S[i][j] =='#'):
D[i][j] = 0
elif(i == H-1):
D[i][j] = 1
else:
D[i][j] = D[i+1][j] + 1
ans = 0
for i in range(H):
for j in range(W):
ans = max(ans, L[i][j] + R[i][j] + U[i][j] + D[i][j] - 3)
print(ans)
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
H = int(next(tokens)) # type: int
W = int(next(tokens)) # type: int
S = [ next(tokens) for _ in range(H) ] # type: "List[str]"
solve(H, W, S)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,830 | 1 | 395 | main.py.1 | 217 | 0.372667 | 0.35236 | 0 | 75 | 23.293333 | 165 |
kingdion/filmstarter-server | 6,932,077,217,421 | fad19d9b50ec0068744eca973d9a1c214d90660b | 1e29104d1ed925af50c0e2f8867d8c48d7e9d8e0 | /create_script.py | 09da1f5f9f4d8db49f211c3e04b2900beb58d967 | []
| no_license | https://github.com/kingdion/filmstarter-server | 31f1bdea29d863104d2268e9d474ac7d5577f466 | 5f9cf86e58ca69f7b8f94b1f7b311f273a35d228 | refs/heads/master | 2020-05-27T14:28:18.187840 | 2019-06-03T04:30:44 | 2019-06-03T04:30:44 | 188,659,609 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
import string
import random
from app import create_app
from app.models import *
from werkzeug.security import generate_password_hash, check_password_hash
import uuid
app = create_app()
with app.app_context():
db.drop_all()
db.create_all()
user1 = Account("John", "Doe", "TestUser01", "testuser01@gmail.com", generate_password_hash("TestUser01", method='sha256'))
user2 = Account("Jane", "Doe", "TestUser02", "testuser02@gmail.com", generate_password_hash("TestUser02", method='sha256'))
user3 = Account("Writer", "Doe", "TestUser03", "testuser03@gmail.com", generate_password_hash("TestUser03", method='sha256'))
user4 = Account("Cinematographer", "Doe", "TestUser04", "testuser04@gmail.com", generate_password_hash("TestUser04", method='sha256'))
project1 = Project("Harry Potter", "Warner Brothers", "A movie about wizards", ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)))
project2 = Project("Star Wars", "Lucasfilms", "A movie about wizards with swords", ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)))
project3 = Project("The Avengers", "Marvel", "A movie about heroes n stuff", ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)))
project4 = Project("The Avengers: Endgame", "Marvel", "A movie about heroes n stuff and thanos", ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)))
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
db.session.add(user4)
db.session.add(project1)
db.session.add(project2)
db.session.add(project3)
db.session.add(project4)
db.session.commit()
project_link = ProjectLink(user1.id, project1.id, "Director", True)
project_link2 = ProjectLink(user2.id, project1.id, "Actor", True)
project_link3 = ProjectLink(user3.id, project1.id, "Writer", True)
project_link4 = ProjectLink(user4.id, project1.id, "Cinematographer", True)
project_link5 = ProjectLink(user1.id, project2.id, "Writer", False)
project_link6 = ProjectLink(user2.id, project2.id, "Director", False)
project_link7 = ProjectLink(user1.id, project3.id, "Cinematographer", False)
project_link8 = ProjectLink(user1.id, project4.id, "Director", False)
db.session.add(project_link)
db.session.add(project_link2)
db.session.add(project_link3)
db.session.add(project_link4)
db.session.add(project_link5)
db.session.add(project_link6)
db.session.add(project_link7)
db.session.add(project_link8)
db.session.commit() | UTF-8 | Python | false | false | 2,704 | py | 6 | create_script.py | 6 | 0.670858 | 0.639053 | 0 | 53 | 50.037736 | 174 |
KaprianZ/IFPI | 9,191,230,032,283 | 3930d61be8d24102f4fb09576b7511121fec430c | 396886c96fe16f6bee2cfba4466d0a4d3df78044 | /Python/Atividades - DOT/Ativ. Sem. 01/Peso Ideal.py | 890d572351826012210474135396a77dc7424f94 | [
"MIT"
]
| permissive | https://github.com/KaprianZ/IFPI | 198db256b62b0764ab33d0aae4fc56158fa1f40c | 54ffea357073f03f70010670a88382bf06a0af1f | refs/heads/main | 2023-07-15T20:08:43.453771 | 2021-08-27T16:40:59 | 2021-08-27T16:40:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def peso_ideal(a, s):
if s == 1:
return (62.1 * a) - 44.7
if s == 2:
return (72.7 * a) - 58
def main():
h = float(input("Digite a sua altura: "))
s = int(input("Você é homen ou mulher? 1 - Mulher 2 - Homem "))
c = peso_ideal(h, s)
print(f"Seu peso ideal é {c} kg.")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 357 | py | 21 | Peso Ideal.py | 12 | 0.477401 | 0.435028 | 0 | 18 | 18.666667 | 67 |
drnodev/CSEPC110 | 10,084,583,229,656 | 05b40eb220e7302a73197723786bfb86d51f0bba | 61e3974b8862606ef69953ed3cc745237de35e7b | /prove01_colors.py | c0d56146bc2ac797256402f2f3556883b62ac507 | []
| no_license | https://github.com/drnodev/CSEPC110 | 23a7a1113a12fdb9af6bef94bff3858105073cd5 | bc575262ac2f56864ebe6c4427433affc90efbb9 | refs/heads/master | 2023-05-26T03:16:16.731471 | 2021-06-08T03:02:20 | 2021-06-08T03:02:20 | 367,801,860 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
File: prove01_colors.py
Author: NO
Purpuse: This program ask your favorite color, and give you back
"""
color = input("Please type your favorite color: ")
print("Your favorite color is: {} Woow!!!!! That color is gorgeous".format(color))
dislike = input("What color do you dislike? ")
print(":( .... Oh!!!! No!!! {} is a nice color.".format(dislike))
print("Bye :P") | UTF-8 | Python | false | false | 373 | py | 7 | prove01_colors.py | 6 | 0.670241 | 0.664879 | 0 | 12 | 30.166667 | 82 |
vakulenkoannag/gloss | 5,171,140,666,941 | 5fa5dca40fc9cf4c52bd9ddc1bf95288bbb79726 | f892a1c6b5d82ab64aa284fe0696a812c4c3ede9 | /hw9/hw9.py | edda968432b67c319544b27c8eae797a9ab3a7a7 | []
| no_license | https://github.com/vakulenkoannag/gloss | 8724431949c3c3755955681660aeb5ee6dff16fd | 107937b92c3d0d30da7927fd6745d2be74eefa6c | refs/heads/master | 2018-09-07T15:47:39.755998 | 2018-06-20T09:47:52 | 2018-06-20T09:47:52 | 103,251,849 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
def nameinput():
filename = input('Введите название файла: ')
return filename
def gettext(filename):
with open (filename, 'r', encoding='utf-8') as f:
text = f.read().lower()
for symbol in ['.', ',', ':', ';', '—', '!', '?', '*', '&', '"', '"', '«', '»', '…', '/', '(', ')']:
if symbol in text:
text = text.replace(symbol, ' ')
return text
def searchwords(text):
found = {}
allforms = ['найтис?ь?\s', 'найд[еёияу][штм]?[еь]?с?[ья]?\s',
'найден[оаы]?н?[а-я]?[а-я]?[а-я]?\s', 'наш[её]?л[аои]?с?[ья]?\s',
'нашедш[а-я][а-я]?с?[ья]?\s']
for form in allforms:
foundforms = re.findall(form, text)
for word in foundforms:
word = word.strip()
if word in found:
found[word] += 1
else:
found[word] = 1
return found
def listforms(found):
for key, value in found.items() :
print(key)
def checkfile(filename):
try:
with open(filename): pass
except FileNotFoundError:
return False
def final():
filename = nameinput()
while checkfile(filename) == False:
print('Такого файла тут нет! Попробуйте ввести другое название.')
filename = nameinput()
else:
print('Список встретившихся в тексте форм глагола «найти»: ')
listforms(searchwords(gettext(filename)))
final()
| UTF-8 | Python | false | false | 1,596 | py | 65 | hw9.py | 41 | 0.529703 | 0.527581 | 0 | 49 | 27.857143 | 104 |
benti/Error-Pypagation | 13,134,010,037,519 | 5ca30be7f3c5b54567833b5548148d43066d0f8b | d1011bcd64c5aae27738a877f2ab86b06395689c | /errorpro/core.py | c5bd46bfd295bac3a8eca1cb24cf0461b9bdbf78 | []
| no_license | https://github.com/benti/Error-Pypagation | 1f0c88b10f2b778c463bfb5c0c5a31d4f409d1cf | 108feddc58a705da82fe6fdce658b419b589b533 | refs/heads/master | 2021-01-16T17:51:41.775271 | 2016-04-21T18:38:33 | 2016-04-21T18:38:33 | 41,669,639 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from sympy import S, Expr, latex, Function, Symbol
from errorpro.units import parse_unit
from errorpro.quantities import Quantity, get_value, get_error, get_dimension
from errorpro.dimensions.dimensions import Dimension
from errorpro import pytex
from IPython.display import Latex as render_latex
def assign(value, error=None, unit=None, name=None, longname=None, value_unit=None, error_unit=None, ignore_dim=False):
""" function to create a new quantity
Args:
value: number or string that can be parsed by numpy, or sympy
expression. If it's a sympy expression containing quantities, it
will perform the calculation, otherwise it just saves the value.
error: number that is saved as the value's uncertainty. this will replace
any error coming from a calculation.
unit: sympy expression of Unit objects. This is used to convert and save
value and error in base units. Replaces value_unit and error_unit if
specified.
name: short name of the quantity (usually one letter). If not specified,
quantity will get a dummy name.
longname: optional additional description of the quantity
value_unit: unit of value. Use this if value and error have different units.
error_unit: unit of error.
ignore_dim: bool. Keeps function from raising an error even if calculated
and given unit don't match. Then given unit is used instead.
"""
value_formula = None
value_factor = 1
value_dim = Dimension()
error_formula = None
error_factor = 1
error_dim = Dimension()
# parse units
if unit is not None:
# if one general unit is given
value_factor, value_dim, value_unit = parse_unit(unit)
error_factor = value_factor
error_dim = value_dim
error_unit = value_unit
else:
# if value unit is given
if value_unit is not None:
value_factor, value_dim, value_unit = parse_unit(value_unit)
# if error unit is given
if error_unit is not None:
error_factor, error_dim, error_unit = parse_unit(error_unit)
# check dimension consistency between value_dim and error_dim
if value_unit is not None and not value_dim == error_dim:
raise RuntimeError("dimension mismatch\n%s != %s" % (value_dim, error_dim))
# process value
# if it's a calculation
if isinstance(value, Expr) and not value.is_number:
value_formula = value
value = get_value(value_formula)
if ignore_dim:
# with ignore_dim=True, calculated value is converted to given unit
value = np.float_(value_factor)*np.float_(value)
else:
# calculate dimension from dependency
calculated_dim = get_dimension(value_formula)
if value_unit is None:
value_dim = calculated_dim
else:
if not calculated_dim == value_dim:
raise RuntimeError("dimension mismatch \n%s != %s" % (value_dim, calculated_dim))
# if it's a number
else:
value=np.float_(value_factor)*np.float_(value)
# process error
if error is not None:
error=np.float_(error_factor)*np.float_(error)
# check value and error shapes and duplicate error in case
if error.shape == () or value.shape[-len(error.shape):] == error.shape:
error = np.resize(error, value.shape)
else:
raise RuntimeError("length of value and error don't match and "\
"can't be adjusted by duplicating.\n"\
"%s and %s" % (value.shape, error.shape))
# if error can be calculated
elif value_formula is not None:
error, error_formula = get_error(value_formula)
if ignore_dim:
# with ignore_dim=True, calculated error is converted to given unit
error = np.float_(error_factor)*np.float_(error)
q = Quantity(name, longname)
q.value = value
q.value_formula = value_formula
q.error = error
q.error_formula = error_formula
if value_unit is not None:
q.prefer_unit = value_unit
else:
q.prefer_unit = error_unit
q.dim = value_dim
return q
def formula(quantity):
""" returns error formula of quantity as latex code
Args:
quantity: Quantity object
Return:
latex code string of error formula
"""
assert isinstance(quantity, Quantity)
if quantity.error_formula is None:
raise ValueError("quantity '%s' doesn't have an error formula." % quantity.name)
formula = quantity.error_formula
if isinstance(formula,str):
return formula
else:
# replace "_err" by sigma function
sigma = Function("\sigma")
for var in formula.free_symbols:
if var.name[-4:] == "_err":
formula = formula.subs(var, sigma( Symbol(var.name[:-4], **var._assumptions)))
latex_code = latex(sigma(quantity)) + " = " + latex(formula)
form_button, form_code = pytex.hide_div('Formula', '$%s$' % (latex_code) , hide = False)
latex_button, latex_code = pytex.hide_div('LaTex', latex_code)
res = 'Error Formula for %s<div width=20px/>%s%s<hr/>%s<br>%s' % (
'$%s$' % latex(quantity), form_button, latex_button, form_code, latex_code)
return render_latex(res)
| UTF-8 | Python | false | false | 5,465 | py | 20 | core.py | 17 | 0.627081 | 0.625984 | 0 | 148 | 35.925676 | 119 |
savethebeesandseeds/cuwacunu_dowaave | 111,669,192,281 | 041eac041cc13828e739d0393dd5f2644d1e875a | 451d5e3ed6f65bfe6b6918caa4a07ec5c2734de7 | /cwcn_duuruva_piaabo.py | 6bb1efe9d2ca28e1f87bafa0d8fbc6dc91f67297 | []
| no_license | https://github.com/savethebeesandseeds/cuwacunu_dowaave | b75c40d236be23bedc8b1721d1c9303bfa6f94ff | 836406874e59c97f97253c42fcbaca8c751dce1a | refs/heads/master | 2023-08-30T01:22:41.742420 | 2021-10-25T05:43:23 | 2021-10-25T05:43:23 | 411,772,746 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # --- --- ---
# cwcn_duuruva_piaabo.py
# --- --- ---
# a mayor TEHDUJCO to python fundation
# --- --- ---
# a mayor TEHDUJCO to the torch fundation
# --- --- ---
import math
# --- --- ---
import cwcn_dwve_client_config as dwvc
# --- --- ---
class DUURUVA:
def __init__(self,_duuruva_vector_size : int,_wrapper_duuruva_normalize, _d_name : str=None):
self._d_name=_d_name
self._wrapper_duuruva_std_or_norm=_wrapper_duuruva_normalize
self._duuruva_vector_size=_duuruva_vector_size
self._reset_duuruva_()
def _reset_duuruva_(self):
self._d_count=0
self._duuruva=[]
for _ in range(self._duuruva_vector_size):
aux_d={}
aux_d['value'] = 0
aux_d['diff_1'] = 0
aux_d['diff_2'] = 0
aux_d['max'] = 0
aux_d['min'] = 0
aux_d['variance'] = 0
aux_d['mean'] = 0
aux_d['M2'] = 0
aux_d['M3'] = 0
aux_d['M4'] = 0
aux_d['kurtosis'] = 0
aux_d['skewness'] = 0
self._duuruva.append(aux_d)
def _is_duuruva_ready_(self):
return dwvc.CWCN_DUURUVA_CONFIG.DUURUVA_READY_COUNT<=self._d_count
def _duuruva_inverse_value_wrapper_(self,c_vect):
for _v_idx in range(self._duuruva_vector_size):
if(self._duuruva_vector_size==1):
c_value = c_vect
else:
c_value = c_vect[_v_idx]
try:
if(self._wrapper_duuruva_std_or_norm == 'norm'):
c_standar = (c_value)*(math.sqrt(self._duuruva[_v_idx]['variance']) + dwvc.CWCN_DUURUVA_CONFIG.MIN_STD) + self._duuruva[_v_idx]['mean']
elif(self._wrapper_duuruva_std_or_norm == 'std'):
c_standar = (c_value - self._duuruva[_v_idx]['mean'])*(math.sqrt(self._duuruva[_v_idx]['variance']) + dwvc.CWCN_DUURUVA_CONFIG.MIN_STD) + self._duuruva[_v_idx]['mean']
elif(self._wrapper_duuruva_std_or_norm == 'mean'):
c_standar = (c_value + self._duuruva[_v_idx]['mean'])
elif(self._wrapper_duuruva_std_or_norm == 'not'):
c_standar = c_value
else:
assert(False), "wrong wrapper_duuruva_std_or_norm configuration"
except Exception as e:
if(self._wrapper_duuruva_std_or_norm == 'not'):
c_standar=c_value
else:
c_standar=0
if(self._is_duuruva_ready_()):
raise Exception("Error processing duuruva : {}".format(e))
# --- --- --- --- ---
if(self._is_duuruva_ready_() or self._wrapper_duuruva_std_or_norm == 'not'):
if(self._duuruva_vector_size==1):
c_vect = c_standar
else:
c_vect[_v_idx] = c_standar
else:
if(self._duuruva_vector_size==1):
c_vect = 0
else:
c_vect[_v_idx] = 0
def _duuruva_value_wrapper_(self,c_vect):
self._d_count+=1
_n = min(self._d_count,dwvc.CWCN_DUURUVA_CONFIG.DUURUVA_MAX_COUNT)
for _v_idx in range(self._duuruva_vector_size):
if(self._duuruva_vector_size==1):
c_value = c_vect
else:
c_value = c_vect[_v_idx]
# --- --- --- --- --- --- --- --- --- --- a mayor TEHDUJCO to the WIKI
# --- --- --- --- https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
self._duuruva[_v_idx]['value']=c_value
self._duuruva[_v_idx]['max']=max(self._duuruva[_v_idx]['max'], self._duuruva[_v_idx]['value'])
self._duuruva[_v_idx]['min']=min(self._duuruva[_v_idx]['min'], self._duuruva[_v_idx]['value'])
_delta = self._duuruva[_v_idx]['value'] - self._duuruva[_v_idx]['mean']
_delta_n = _delta/_n
_delta_n2 = _delta_n*_delta_n
_term1 = _delta*_delta_n*(_n-1)
self._duuruva[_v_idx]['mean'] += _delta_n
self._duuruva[_v_idx]['M4'] += _term1*_delta_n2*(_n*_n-3*_n+3)+6*_delta_n2*self._duuruva[_v_idx]['M2']-4*_delta_n*self._duuruva[_v_idx]['M3']
self._duuruva[_v_idx]['M3'] += _term1*_delta_n*(_n-2)-3*_delta_n*self._duuruva[_v_idx]['M2']
self._duuruva[_v_idx]['M2'] += _term1
try:
self._duuruva[_v_idx]['variance'] = self._duuruva[_v_idx]['M2']/(_n-1)
self._duuruva[_v_idx]['kurtosis'] = (_n*self._duuruva[_v_idx]['M4'])/(self._duuruva[_v_idx]['M2']*self._duuruva[_v_idx]['M2'])-3
self._duuruva[_v_idx]['skewness'] = math.sqrt(_n)*self._duuruva[_v_idx]['M3']/(math.pow(self._duuruva[_v_idx]['M2'],3)*math.sqrt(self._duuruva[_v_idx]['M2'])) #FIXME check if is right
if(self._wrapper_duuruva_std_or_norm == 'norm'):
c_standar = (c_value - self._duuruva[_v_idx]['mean'])/(math.sqrt(self._duuruva[_v_idx]['variance']) + dwvc.CWCN_DUURUVA_CONFIG.MIN_STD)
elif(self._wrapper_duuruva_std_or_norm == 'std'):
c_standar = (c_value - self._duuruva[_v_idx]['mean'])/(math.sqrt(self._duuruva[_v_idx]['variance']) + dwvc.CWCN_DUURUVA_CONFIG.MIN_STD) + self._duuruva[_v_idx]['mean']
elif(self._wrapper_duuruva_std_or_norm == 'mean'):
c_standar = (c_value - self._duuruva[_v_idx]['mean'])
elif(self._wrapper_duuruva_std_or_norm == 'not'):
c_standar = c_value
else:
assert(False), "wrong wrapper_duuruva_std_or_norm configuration"
except Exception as e:
if(self._wrapper_duuruva_std_or_norm == 'not'):
c_standar=c_value
else:
c_standar=0
if(self._is_duuruva_ready_()):
raise Exception("Error processing duuruva : {}".format(e))
# --- --- --- --- ---
if(self._is_duuruva_ready_() or self._wrapper_duuruva_std_or_norm == 'not'):
if(self._duuruva_vector_size==1):
c_vect = c_standar
else:
c_vect[_v_idx] = c_standar
else:
if(self._duuruva_vector_size==1):
c_vect = 0
else:
c_vect[_v_idx] = 0
return c_vect
| UTF-8 | Python | false | false | 6,471 | py | 21 | cwcn_duuruva_piaabo.py | 16 | 0.48586 | 0.476433 | 0 | 126 | 50.349206 | 199 |
schwt/bi_graph | 12,068,858,131,806 | 99e99cc9d0569592c10421f05ac48d93b95482b5 | 98638a1b31c6ffe372c381e1ed7b7bf742e8b489 | /src/print_result.py | 7154708aea2f24b7dfed0e4c3ea6c084280890aa | []
| no_license | https://github.com/schwt/bi_graph | 79e9afb54ed1e7ca2d7871469884971caaaa6021 | 56657dac5b1ef765a5cb33a6ef5f1bee93e2418a | refs/heads/master | 2021-06-04T14:41:32.300563 | 2020-03-19T06:12:48 | 2020-03-19T06:12:48 | 103,367,097 | 1 | 2 | null | false | 2019-03-22T08:34:13 | 2017-09-13T07:20:09 | 2018-11-06T09:20:53 | 2019-03-22T08:34:13 | 125 | 1 | 0 | 0 | C++ | false | null | #!/usr/bin/python
#encoding:utf-8
import sys
s_sep = "\t"
count = 0
f_src = sys.argv[1]
f_dst = sys.argv[2]
f_name = sys.argv[3]
idc_id = int(sys.argv[4])
idc_name = int(sys.argv[5])
if len(sys.argv) >= 7:
count = int(sys.argv[6])
d_name = {}
for line in file(f_name):
sep = line.strip().split(s_sep)
if len(sep) < max(idc_id, idc_name):
continue
id = int(sep[idc_id])
d_name[id] = sep[idc_name]
print "# item name:", len(d_name)
wf = open(f_dst, "w")
cnt0 = 0
cnt1 = 1
for line in file(f_src):
cnt0 += 1
if 1:
# try:
id, recos = line.strip(" ,\t\n\r").split("\t")
sep = recos.split(",")
id = int(id)
wf.write("[%d] \t m=%d \t len=%d \t\t %s\n" % (cnt1, id, len(sep), d_name.get(id, "null")))
cnt2 = 1
for kv in sep:
k,v = kv.split(":")
rid = int(k)
wf.write("\t[%2d]\t %-9d\t %-8s\t %s\n" % (cnt2, rid, v, d_name.get(rid, "null")))
cnt2 += 1
cnt1 += 1
if count > 0 and cnt1 > count:
break
wf.write("\n")
else:
# except:
print "error line: (%s)" % line.strip()
continue
wf.close()
print "# src line:", cnt0
print "# dst line:", cnt1 -1
| UTF-8 | Python | false | false | 1,249 | py | 42 | print_result.py | 35 | 0.486789 | 0.461169 | 0 | 55 | 21.709091 | 99 |
thaahtel/Toinen_kerta | 16,750,372,456,086 | 63b3ea4b3ce346de19dfb0e5bac8722bdd7c33ce | 46650a83f935d76c8556ae5d12c1994e4d8ff84a | /lukumaara1.py | 66d5bf33bf760286da720415527514d49536c2ba | []
| no_license | https://github.com/thaahtel/Toinen_kerta | 88289c7c51f00ffaafbab03399168cce42e09834 | 2ce2bf6648a984fcafade8f539f8847a2eae04ea | refs/heads/master | 2022-11-06T12:42:26.492506 | 2020-06-15T11:00:12 | 2020-06-15T11:00:12 | 272,415,403 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("Syötä kokonaislukuja, 0 lopettaa:")
luku = int(input("Luku: "))
lkm = 0
while luku != 0:
luku = int(input("Luku: "))
lkm = lkm +1
print("Lukuja yhteensä " + str(lkm))
| UTF-8 | Python | false | false | 187 | py | 7 | lukumaara1.py | 6 | 0.603261 | 0.581522 | 0 | 9 | 19.444444 | 42 |
Refragg/VMU-Bad-Apple | 18,992,345,395,434 | 056075c1e011de1ca8f65223a06a7536530a319c | bdccfab594f79677936c14b8dd26a378c96cddfa | /converter.py | 2100dd58371f667ba5b88854e6c8543bc8e069cc | []
| no_license | https://github.com/Refragg/VMU-Bad-Apple | 5a66ad86a61b86f5c85267d0eb89639de5416bf1 | bfd0db411000c906082f2a51e73b8474dbcf7afa | refs/heads/master | 2023-04-02T13:42:47.457301 | 2021-04-13T19:00:41 | 2021-04-13T19:00:41 | 357,643,353 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This script assumes you have a set of frames that are 48x32 in size
and a bit-size of 1 or equivalent so that it can be converted correctly.
"""
from PIL import Image
totalbytes = bytearray()
for i in range(6073): # Iterate through all frames
currentPath = "frames/frame" + str(i) + ".png" # Set Path to current frame (change filepath and extension accordingly)
image = Image.open(currentPath) # Open Image at path
curbyte = 0 # Initialise byte builder
for i in range(31,-1,-1): # Iterate from the bottom right pixel (VMU format)
for j in range(47,-1,-1):
# Build the current byte (6 for each horizontal line)
# Take the value at the current pixel and flip it by abusing python types
# The value is shifted to it's relative position
curbyte += (not image.getpixel((j,i))) << (j % 8)
if 7 - (j % 8) == 7: # If the byte is finished, we add it to the bytearray
totalbytes.append(curbyte)
curbyte = 0
image.close()
# Create a byte file and store everything in order for the dreamcast program to read
finalfile = open('frames.bin', 'wb')
finalfile.write(totalbytes)
finalfile.close()
### String Formatting for one frame ###
"""
for i in range(0, len(curbs), 6):
strbuild = ""
for byte in range(6):
hexbyte = hex(curbs[i + byte])
if len(hexbyte) == 3:
strbuild += "0x0" + hexbyte[-1].upper() + ", "
else:
strbuild += hexbyte.upper() + ", "
print(strbuild)
"""
| UTF-8 | Python | false | false | 1,624 | py | 5 | converter.py | 3 | 0.589286 | 0.570197 | 0 | 49 | 31.142857 | 122 |
tojames/mobilesystem | 11,184,094,839,430 | 79e96d8fe34092149f8f6504d62b2423fba96cd7 | fc633b759fff244f1d753719d147e8553ccc231b | /mobile/apps/entry/urls/tags.py | 53aeba7081299295eb4d3e3aa921278bc30a649c | []
| no_license | https://github.com/tojames/mobilesystem | 99ef591e77e830a875eeeefb471848e5d4f856fa | ab30887bab69f46c93461ab550311fb1ed865d65 | refs/heads/master | 2019-07-05T13:21:44.239144 | 2011-12-18T11:06:03 | 2011-12-18T11:06:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Urls for the entry tags"""
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
from apps.entry.views.tags import tag_detail
tag_conf = {'template_name': 'entry/tag_list.html'}
urlpatterns = patterns('entry.views.tags',
url(r'^$', 'tag_list', tag_conf, name='entry_tag_list'),
url(r'^(?P<tag>[- \w]+)/$', tag_detail, {}, 'entry_tag_detail'),
url(r'^(?P<tag>[- \w]+)/page/(?P<page>\d+)/$', tag_detail, {}, 'entry_tag_detail_paginated'),
)
| UTF-8 | Python | false | false | 570 | py | 300 | tags.py | 178 | 0.554386 | 0.554386 | 0 | 11 | 50.818182 | 116 |
yshlodha/multidb_router | 10,067,403,360,483 | 020e733c9e884550d43c2b341cebf6129d819ed9 | f01597b46110214c3823e762b0e46455d67930f7 | /multidb_router_app/data_handler.py | 2342ceb8375c703de4c8cc43cd9cb352d16d39ce | []
| no_license | https://github.com/yshlodha/multidb_router | 3f128399391b7a40668117d60d29f7c34d805dc3 | ce7769be4c2b0f10ccec375f72f5fd6dd5fe1e46 | refs/heads/master | 2020-03-10T00:03:13.877376 | 2018-04-11T12:24:58 | 2018-04-11T12:24:58 | 129,072,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .models import *
def get_product_list(user):
"""
:param user:
:return:
"""
product_list = []
try:
dbuser = DatabaseUser.objects.get(user__username=user.username)
dbs = dbuser.databases.all()
for db in dbs:
products = Product.objects.using(db.name).filter(user__id=dbuser.id)
if products:
product_list.append({db.name: products})
except DatabaseUser.DoesNotExist:
return []
return product_list
def get_user_dbs(user):
"""
:param user:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=user.username)
db_list = [db.name for db in dbuser.databases.all()]
except DatabaseUser.DoesNotExist:
return []
return db_list
def get_dbuser(username):
"""
:param username:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=username)
except DatabaseUser.DoesNotExist:
return None
return dbuser
def add_product_to_db(user, name, category, database):
"""
:param user:
:param name:
:param category:
:param database:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=user.username)
product_object = Product(user_id=dbuser.id, category=category, name=name)
product_object.save(using=database)
except DatabaseUser.DoesNotExist:
return False
return True
def get_all_user_products():
"""
:return:
"""
users = []
dbusers = DatabaseUser.objects.filter(user__is_superuser=False)
for dbuser in dbusers:
products = get_product_list(dbuser.user)
if products:
users.append({dbuser.user.username: products})
return users
def delete_product(username, product_id, db):
"""
:param username:
:param product_id:
:param db:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=username)
except DatabaseUser.DoesNotExist:
return False
try:
product = Product.objects.using(db).get(id=product_id, user__id=dbuser.id)
except Product.DoesNotExist:
return False
product.delete()
return True | UTF-8 | Python | false | false | 2,233 | py | 14 | data_handler.py | 7 | 0.61442 | 0.61442 | 0 | 90 | 23.822222 | 82 |
HsOjo/FlaskExamples | 833,223,687,823 | 2df8260b025251870213a1ec3ff49382c6bd7d8a | acf2f17a28922f72b9ea792fedee57eb4a6c2b42 | /FlaskChapter4/forms.py | 0b3bcd020a941cd566268648371741e0e58d09d1 | []
| no_license | https://github.com/HsOjo/FlaskExamples | 1339246e7f07c2863451e5f48783bde772c8399c | 24246d7f18fbcf4db8a1de7ad68b299fd54047c4 | refs/heads/master | 2020-06-22T20:26:52.375162 | 2020-05-15T17:23:34 | 2020-05-15T17:23:34 | 198,390,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, IntegerField, TextAreaField, SelectField, \
SelectMultipleField, RadioField
from wtforms.validators import DataRequired, Email
class LoginForm(FlaskForm):
username = StringField(label='用户名', validators=[DataRequired()])
password = PasswordField(label='密码', validators=[DataRequired()])
submit = SubmitField(label='登录')
class RegisterForm(FlaskForm):
username = StringField(label='用户名', validators=[DataRequired()])
password = PasswordField(label='密码', validators=[DataRequired()])
age = IntegerField(label='年龄', validators=[DataRequired()])
phone = StringField(label='电话', validators=[DataRequired()])
email = StringField(label='邮箱', validators=[DataRequired(), Email()])
introduce = TextAreaField(label='自我介绍')
education = SelectField(label='学历', coerce=int, choices=list(
{
0: '保密',
1: '中专',
2: '高中',
3: '专科',
4: '本科',
}.items()
))
sex = RadioField(label='性别', validators=[DataRequired()], coerce=int, choices=list(
{
1: '男',
2: '女',
}.items()
))
skill = SelectMultipleField(label='特长', validators=[DataRequired()], coerce=int, choices=list(
{
1: '软件开发',
2: '系统运维',
3: '网络安全',
}.items()
))
submit = SubmitField(label='注册')
| UTF-8 | Python | false | false | 1,566 | py | 100 | forms.py | 55 | 0.60631 | 0.599451 | 0 | 42 | 33.714286 | 104 |
Sagar2366/SPOJ-Python-Solutions | 8,529,805,059,533 | c7a065969920f2cf830a13da4deba2ba5a745248 | 8ae6d9b79ed1d18ce9677099225b17d99c64171d | /CANDY.py | f8e4953cb593af7f10f4b74f71741947e454b223 | []
| no_license | https://github.com/Sagar2366/SPOJ-Python-Solutions | d0645ce08378da6220a1e03577c246a5aa7db841 | 3acc1431761d53b96dff9c49a2d9c785c377abe4 | refs/heads/master | 2020-05-09T13:42:47.887839 | 2016-05-30T11:54:23 | 2016-05-30T11:54:23 | 181,164,916 | 1 | 0 | null | true | 2019-04-13T12:03:50 | 2019-04-13T12:03:49 | 2017-07-21T01:36:37 | 2016-05-30T11:55:06 | 31 | 0 | 0 | 0 | null | false | false | while True:
noOfCase=int(raw_input())
if noOfCase<0:
break
else:
series=[]
for i in range(noOfCase):
series.append(int(raw_input()))
sum1=0
for i in series:
sum1+=i
if sum1%noOfCase==0:
avg=sum1/noOfCase
moves=0
for i in series:
if (avg-i)<0:
moves+=i-avg
print(moves)
else:
print(-1) | UTF-8 | Python | false | false | 472 | py | 73 | CANDY.py | 71 | 0.427966 | 0.40678 | 0 | 20 | 22.65 | 43 |
somphorsngoun/graphic-1 | 670,014,922,131 | 37cd2d7a09ef0fa9f66b309a4aec4a017de7c251 | 21c5a502b9cba927d47e6989be10d9fe1e04b87d | /Homework/Import/Events-/event_1.py | 40ee7b962f574456b68ad00afa4e91c50a13ad3f | []
| no_license | https://github.com/somphorsngoun/graphic-1 | 19e011c5ad9d45a574dc8f50e10d8a70510c274b | a3b50f6b948cfdf67b040c1eb85109b3f58bce86 | refs/heads/main | 2023-02-19T00:01:15.462078 | 2021-01-19T08:28:57 | 2021-01-19T08:28:57 | 330,911,479 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
import random
def doOnClick_1(event):
print("class B")
def doOnClick_2(event):
print("class C")
# Create an empty window
root = tk.Tk()
root.geometry("600x400")
canvas = tk.Canvas(root)
oval = canvas.create_oval(50, 50, 300, 300, fill="#F39118", tags="myTag")
canvas.tag_bind("myTag", "<Button-1>", doOnClick_1)
canvas.tag_bind("myTag", "<Button-3>", doOnClick_2)# Bind the click
canvas.pack(expand=True, fill='both')
root.mainloop() | UTF-8 | Python | false | false | 492 | py | 28 | event_1.py | 28 | 0.654472 | 0.599593 | 0 | 23 | 19.478261 | 73 |
malminhas/trello-utils | 19,396,072,329,998 | 32174ed3bd92fee25621df31ab39a11889d76f3a | d95ca79b4341bc404e31d32760c7e91d39969131 | /trelloDataProcessor.py | 6752f32ede2a85b2a7f99d2f7e836bbe1a0111df | [
"Apache-2.0"
]
| permissive | https://github.com/malminhas/trello-utils | 08d2ffbfe960be2284a08ac1270d4305604c736b | 49a011de1563a87a87e32523dc6d1c73f1fa2a66 | refs/heads/master | 2020-03-28T19:54:25.405050 | 2019-12-02T17:01:35 | 2019-12-02T17:01:35 | 149,020,339 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#
# trelloDataProcessor.py
# ----------------------
#
# Mal Minhas <mal@kano.me>
# Copyright (c) 2018 Kano Computing. All Rights Reserved.
# Licence: GPLv3
#
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import cm
import seaborn as sns
# use Seaborn styles
sns.set()
import os
import arrow
import datetime
formatDateTime = lambda s: arrow.get(s).format('YYYY-MM-DD HH:mm:ss')
class TrelloDataProcessor(object):
def __init__(self,force,verbose=False):
self.verbose = verbose
self.force = force
self.start = None
self.cards = pd.DataFrame()
self.counts = pd.DataFrame()
if os.path.exists('cards.csv'):
self.cards = pd.read_csv('cards.csv')
if os.path.exists('counts.csv'):
self.counts = pd.read_csv('counts.csv')
def getCards(self):
return self.cards
def getCounts(self):
return self.counts
def getStart(self):
start = None
if os.path.exists('.start'):
with open('.start','r') as f:
start = f.read()
return start
def setStart(self,start):
with open('.start','w') as f:
f.write(start)
def createCardDistributionBarChart(self, cards, desc, colors=None, reverse=False, output=None):
df = pd.DataFrame(cards)
if self.force or not os.path.exists('cards.csv'):
df.to_csv('cards.csv')
print("{} rows, {} columns".format(df.shape[0],df.shape[1]))
gps = df.groupby(['list'])
longest = 0
for i,gp in enumerate(gps):
self.verbose and print("{:02d} cards in '{}'".format(len(gp[1]),gp[0]))
longest = max(longest,len(gp[1]))
nrows = i+1
longest += int(longest/20)
self.verbose and print("longest value={}".format(longest))
today = arrow.utcnow().format("YYYY-MM-DD")
if reverse:
buckets = df.groupby(['list']).size()[::-1]
colors = colors[::-1]
else:
buckets = df.groupby(['list']).size()
if (nrows != len(colors)):
print("Mismatch between number of colors {} and number of rows {} in graph!".format(len(colors),nrows))
cmap = cm.get_cmap('jet')
if longest > 50:
if colors:
ax = buckets.plot(kind='barh',color=colors,title='Current {} distribution {}'.format(desc,today),figsize=(18,9))
else:
ax = buckets.plot(kind='barh',cmap=cmap,title='Current {} distribution {}'.format(desc,today),figsize=(18,9))
else:
if colors:
ax = buckets.plot(kind='barh',xticks=list(range(0,longest)),color=colors,title='Current {} distribution {}'.format(desc,today),figsize=(18,9))
else:
ax = buckets.plot(kind='barh',xticks=list(range(0,longest)),cmap=cmap,title='Current {} distribution {}'.format(desc,today),figsize=(18,9))
ax.set_xticklabels(list(range(0,longest)))
ax.set_ylabel('Trello List')
ax.set_xlabel('count')
if output:
name = output
else:
name = '{}Snapshot_{}.png'.format(desc, today)
plt.savefig(name)
return name
def createCardTimeSeriesStackedBarChart(self, counts, desc, selected, start, end=None, colors=None, output=None):
df = pd.DataFrame(counts)
if self.force or not os.path.exists('counts.csv'):
df.to_csv('counts.csv')
df.date = pd.to_datetime(df.date)
datetimeArr = list(map(formatDateTime,df['date'].tolist()))
# Set index of df to 'date' column and then delete
df.index = df['date']
df = df.drop(columns=['date'])
print("{} rows, {} columns".format(df.shape[0],df.shape[1]))
print("Start date = {}".format(start))
print("Colors: {}".format(colors))
assert(len(counts) == df.shape[0])
today = arrow.utcnow().format("YYYY-MM-DD")
if not len(selected):
# We will select ALL lists
selected = df.columns.values.tolist()
#if not len(colors):
# # We want to create a random distribution per discussion here:
# # https://matplotlib.org/users/dflt_style_changes.html#colors-color-cycles-and-color-maps
# #colors = 'bgrcmyk' # classic
# colors = ['#1f77b4','#ff7f0e','#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd22','#17becf'] # v2.0 default
if not colors:
colors.append('cool') # default
if colors[0] in ['summer','autumn','winter','spring','cool']:
cmap = cm.get_cmap(colors[0])
colors = None
if df.shape[0] > 50:
print('Greater than 50 date values!')
# More than 50 dates to plot => need to switch to default xaxis handling
if colors:
ax = df[selected].plot(kind='bar',stacked=True,color=colors,xticks=df.index,
title='{} Board time series {}'.format(desc,today),figsize=(24,12))
else:
ax = df[selected].plot(kind='bar',stacked=True,cmap=cmap,xticks=df.index,
title='{} Board time series {}'.format(desc,today),figsize=(24,12))
#ax.xaxis_date()
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(df.index)
# Every 4th ticklable shows the month and day
#ticklabels[::4] = [item.strftime('%b %d') for item in df.index[::4]]
# Every 12th ticklabel includes the year
ticklabels[::12] = [item.strftime('%d-%m-%Y') for item in df.index[::12]]
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
plt.xticks(rotation=90)
#plt.gcf().autofmt_xdate()
else:
if colors:
ax = df[selected].plot(kind='bar',stacked=True,color=colors,xticks=df.index,
title='{} time series {}'.format(desc,today),figsize=(18,9))
else:
ax = df[selected].plot(kind='bar',stacked=True,cmap=cmap,xticks=df.index,
title='{} time series {}'.format(desc,today),figsize=(18,9))
# Not using this any more - going with ticklabels approach
#ax.set_xticklabels(datetimeArr)
#
#ax.xaxis_date()
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(df.index)
# Every 4th ticklable shows the month and day
#ticklabels[::4] = [item.strftime('%b %d') for item in df.index[::4]]
# Every 12th ticklabel includes the year
ticklabels = [item.strftime('%d-%m-%Y') for item in df.index]
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
plt.xticks(rotation=90)
ax.set_ylabel('count')
# TBD: handling of annotations
#ax.annotate(selected[0], xy=(start,0), color=colors[0], xytext=(start,27),fontsize=12)
#ax.annotate(selected[1], xy=(start,0), color=colors[1],xytext=(start,7),fontsize=12)
#ax.annotate(selected[2], xy=(start,0), color=colors[2],xytext=(start,10),fontsize=12)
#ax.annotate(selected[3], xy=(start,0), color=colors[3],xytext=(start,3),fontsize=12)
#ax.annotate(selected[3], xy=(start,0), color=colors[3],xytext=(start,3),fontsize=12)
#
#featurecomplete = "2018-08-31 21:00:00"
#ax.plot(kind='line')
#ax.axvline(x=featurecomplete, ymin=0, ymax=40, linestyle=':',color='k')
#
if output:
name = output
else:
name = '{}TimeSeries_{}.png'.format(desc,today)
plt.subplots_adjust(top=0.8) # Provides margin at bottom to accommodate legend
plt.subplots_adjust(bottom=0.2) # Provides margin at bottom to accommodate axis
plt.savefig(name)
return name
def getCardCounts(self,df,dt):
qfilter = "category=='updateCard' or category=='createCard' or category=='deleteCard' or category=='moveCardToBoard'"
if not dt:
dt = formatDateTime(datetime.datetime.now())
#today = arrow.utcnow().format("YYYY-MM-DD")
candidates = df[df.date <= dt].query(qfilter)
deduped = candidates.drop_duplicates(subset='card',keep='first').drop_duplicates(subset='date',keep='first')
countsDico = deduped[deduped.closed==False].groupby(['after']).size().to_dict()
return countsDico
def generateDateRange(self,start,end=None):
if not end:
end = formatDateTime(datetime.datetime.now())
print(start,end)
count = 0
drange = []
# convert start to a datetime
dt = arrow.get(start).datetime
et = arrow.get(end).datetime
drange.append(start)
while dt < et:
dt = arrow.get(dt).datetime + datetime.timedelta(days=1)
day = formatDateTime(dt)
drange.append(formatDateTime(day))
count += 1
return drange
def getActionCountsOverTime(self,actions,start,end=None):
counts = []
df = pd.DataFrame(actions)
print("{} rows, {} columns".format(df.shape[0],df.shape[1]))
df.date = pd.to_datetime(df.date)
df.card = df.card.fillna(-1).astype(int)
#df.list = ndf.list.astype('category')
df.category = df.category.astype('category')
df.board = df.board.astype('category')
df.after = df.after.astype('category')
df.actor = df.actor.astype('category')
dts = self.generateDateRange(start,end)
for dt in dts:
dico = self.getCardCounts(df,dt)
dico['date'] = dt
counts.append(dico)
assert(len(counts) == len(dts))
return counts
| UTF-8 | Python | false | false | 9,939 | py | 10 | trelloDataProcessor.py | 8 | 0.576718 | 0.559312 | 0 | 226 | 42.977876 | 158 |
broadinstitute/RVBLR | 11,768,210,427,970 | e4af970023334ec78b72a282f8420be0c2e9b0a1 | daf017f502eb34fc697c366922925c4327ca1869 | /util/RVBlr_examine_each_var_separately.py | de855098a21366981dcee623b50372a19b2e1051 | [
"MIT"
]
| permissive | https://github.com/broadinstitute/RVBLR | 7453870127d4b04c674faa08f2374b1381e6d8b7 | 462848ab21c9352d2c18dbdb9005695e216b217f | refs/heads/master | 2023-07-09T21:10:18.012773 | 2021-08-27T15:32:26 | 2021-08-27T15:32:26 | 346,779,782 | 0 | 1 | MIT | false | 2021-03-11T17:23:58 | 2021-03-11T17:15:27 | 2021-03-11T17:15:50 | 2021-03-11T17:23:58 | 0 | 0 | 1 | 0 | null | false | false | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re
if sys.version_info[0] != 3:
print("This script requires Python 3")
exit(1)
import datetime
import subprocess
import argparse
import logging
FORMAT = "%(asctime)-15s: %(levelname)s %(module)s.%(name)s.%(funcName)s %(message)s"
logger = logging.getLogger('ctat_mutations')
logging.basicConfig(stream=sys.stderr, format=FORMAT, level=logging.INFO)
sys.path.insert(0, os.path.sep.join([os.path.dirname(os.path.realpath(__file__)), "../../../PyLib"]))
from Pipeliner import Pipeliner, Command, run_cmd, ParallelCommandList
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
def main():
parser = argparse.ArgumentParser(description="wrapper for running rvboost", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input_vcf", type=str, required=True, help="input vcf file")
parser.add_argument("--output_dir", type=str, required=True, help="output directory name")
parser.add_argument("--attributes", type=str, required=False, help="vcf info attributes to use for scoring purposes",
default="DJ,PctExtPos,ReadPosRankSum,QD,FS,ED")
parser.add_argument("--score_threshold", type=float, required=False, default=0.05, help="score threshold for filtering rvboost results")
parser.add_argument("--num_threads", type=int, required=False, default=4, help="number of concurrent processes")
args = parser.parse_args()
## prep for run
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
checkpts_dir = os.path.join(output_dir, "__chckpts")
pipeliner = Pipeliner(checkpts_dir)
## build pipeline
atts_list = args.attributes.split(",")
cmd_list = []
for att in atts_list:
cmd = " ".join([ os.path.join(SCRIPTDIR, "RVBoostLikeR_wrapper.py"),
"--input_vcf", args.input_vcf,
"--attributes", att,
"--score_threshold {} ".format(args.score_threshold),
"--work_dir {}/rvb-att-{}".format(args.output_dir, att),
"--output_filename {}/rvb-att-{}.thresh{:.3f}.vcf".format(args.output_dir, att, args.score_threshold) ])
cmd_list.append(cmd)
pipeliner.add_commands([ParallelCommandList(cmd_list, "all_atts-rvb.ok", args.num_threads)])
pipeliner.run()
sys.exit(0)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,538 | py | 7 | RVBlr_examine_each_var_separately.py | 6 | 0.631994 | 0.626478 | 0 | 79 | 31.063291 | 140 |
Jin-SukKim/Algorithm | 455,266,545,077 | fcddb626b46868e15d8d0cece7a60864343188d5 | 11130633fe59b222da0696dc05e72ac30871a573 | /Problem_Solving/leetcode/Algorithm/Binary_Search/167_Two_Sum_II_Input_Array_is_Sorted/twoSumII_v2.py | bb4e1ccdf4274e82b90d536e5cafb4e3287d5144 | []
| no_license | https://github.com/Jin-SukKim/Algorithm | 024aa77c6bf63666910a1eb03407e808a05307ec | 5f2a14fe1f64032126df55b1eadc1580a32735f3 | refs/heads/master | 2023-09-01T20:50:13.150780 | 2023-09-01T07:54:32 | 2023-09-01T07:54:32 | 263,555,962 | 4 | 0 | null | false | 2023-02-14T14:36:38 | 2020-05-13T07:24:51 | 2023-02-12T08:15:48 | 2023-02-14T14:36:37 | 3,011 | 1 | 0 | 0 | C++ | false | false |
import bisect
# 이진 검색으로 풀이하면 검색 log n을 n번 반복하므로 시간 복잡도가 O(n log n)이 된다.
# 투 포인터 방식인 O(n)보다 이진 검색이 더 느리다.
def twoSum(numbers: List[int], target: int) -> List[int]:
for k, v in enumerate(numbers):
left, right = k + 1, len(numbers) - 1
expected = target - v
# 이진 검색으로 나머지 값 판별
while left <= right:
mid = left + (right - left) // 2
if numbers[mid] < expected:
left = mid + 1
elif numbers[mid] > expected:
right = mid - 1
else:
return k + 1, mid + 1
# 파이썬 내장 함수인 이진 검색으로 풀이
def twoSum(self, numbers: List[int], target: int) -> List[int]:
for k, v in enumerate(numbers):
expected = target - v
# 왼쪽 범위를 제한 하는 파라미터 lo, 오른쪽 범위를 제한하는 hi
# bisect.bisect_left(a, x, lo=0, hi=len(a))
i = bisect.bisect_left(numbers, expected, k + 1)
if i < len(numbers) and numbers[i] == expected:
return k + 1, i + 1
| UTF-8 | Python | false | false | 1,177 | py | 400 | twoSumII_v2.py | 347 | 0.523906 | 0.512716 | 0 | 31 | 30.677419 | 63 |
justinsimonelli/Corkboard | 10,419,590,664,004 | 4ccb7e56da1d88f9d12b781dd09f60262c772806 | 02be15f4120c4c29d6d354abe732e3d4cf0fb6a8 | /routes.py | 1a2e281ee09ebad33694aeb0f0cd174db447c6a8 | []
| no_license | https://github.com/justinsimonelli/Corkboard | a2200f5f7a2330ab906300efcafe0bd9572f8069 | c60000e624f1aea098e53be998b167a28fa3706f | refs/heads/master | 2016-09-05T16:25:52.762740 | 2013-12-30T01:53:32 | 2013-12-30T01:53:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!flask/bin/python
from flask import jsonify,make_response,request, render_template, abort
import forecastio, dbUtil
from app import app, models
from config import FORECASTIO_KEY
@app.route('/', methods = ['GET'])
def render_home():
items = models.Todos.query.order_by(models.Todos.timestamp.desc()).all()
return render_template("home.html", todos = items)
@app.route('/corkboard/api/v1/todos', methods = ['GET'])
def get_tasks():
return jsonify( { 'tasks': tasks } )
@app.route('/corkboard/api/v1/todos/add/', methods = ['POST'])
def create_task():
if ((request.json is None)):
abort(400)
data = [dict(message=request.json['message'], latitude='105', longitude='108')]
statusMsg = dbUtil.insert_new_msg(data)
if( statusMsg['status'] == "OK" ):
result = dbUtil.get_latest_record()
record = result['item'][0]
recordDict = {
'id': record.id,
'timestamp': record.timestamp,
'message': record.message,
'latitude': record.latitude,
'longitude': record.longitude
}
return make_response(jsonify( { 'status': statusMsg['status'], 'record': recordDict }), 201)
else:
return make_response(jsonify( { 'status': statusMsg['status'] } ), 400)
@app.route('/corkboard/api/v1/weather/<lat>/<lng>/', methods = ['GET'])
def get_weather(lat=None, lng=None):
forecast = forecastio.load_forecast(FORECASTIO_KEY, lat, lng)
currently = forecast.currently()
dayInfo = {}
for day in forecast.daily().data:
dayInfo.update( generate_weather_dic( day ) )
return make_response(jsonify(dict(current=generate_weather_dic(currently), daily=dayInfo, status='OK')), 201)
@app.route('/corkboard/api/v1/hello/')
@app.route('/corkboard/api/v1/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not Found' } ), 404)
@app.errorhandler(500)
def server_error(error):
return make_response(jsonify( {'error': 'Server error'} ), 500)
def generate_weather_dic( object ):
dayInfo = {}
dateSt = None
if not (object is None):
try:
dateSt = object.time.strftime("%m-%d-%Y")
except Exception, e:
dateSt = str(object.utime)
dayInfo["data"] = {
#order json properties alphabetically just to make things easier
'date' : dateSt,
'high' : object.temperatureMax,
'icon' : object.icon,
'low' : object.temperatureMin,
'precipAccum' : str(object.precipAccumulation),
'precipProbability' : formatFloatingPoint(object.precipProbability , 100),
'precipType' : object.precipType,
'summary' : object.summary,
'windSpeed' : object.windspeed
}
return dayInfo
def formatFloatingPoint( val, multiplier = None ):
if not ( val is None ):
if not ( multiplier is None ):
return ( "%0.2f" % (val * multiplier) )
else:
return ( "%0.2f" % (val) )
if __name__ == '__main__':
app.run(host = '0.0.0.0') | UTF-8 | Python | false | false | 3,244 | py | 12 | routes.py | 9 | 0.60111 | 0.586621 | 0 | 97 | 32.453608 | 113 |
FourColorsSuffice/FSND | 7,026,566,529,563 | 726f85ee49432132743fad050fc209cd94144b84 | 53a3c152a91f2776f63b162281816d14334927e3 | /projects/01_fyyur/starter_code/env/lib/python3.6/shutil.py | 98633f8f8c005e897b6c57ea3372404d4f4cc112 | []
| no_license | https://github.com/FourColorsSuffice/FSND | 789bd8551756fc20fbae97d31230d31a73bf2e90 | dd55b78d380836faa67d29855a64299cc60aa846 | refs/heads/master | 2020-12-02T13:11:02.484351 | 2020-01-02T06:47:37 | 2020-01-02T06:47:37 | 231,016,717 | 0 | 0 | null | true | 2019-12-31T03:09:26 | 2019-12-31T03:09:25 | 2019-12-30T19:18:06 | 2019-12-18T04:41:04 | 2,656 | 0 | 0 | 0 | null | false | false | /Users/pnu/anaconda3/lib/python3.6/shutil.py | UTF-8 | Python | true | false | 44 | py | 43 | shutil.py | 41 | 0.818182 | 0.75 | 0 | 1 | 44 | 44 |
cryptk/pylight | 17,721,035,083,803 | b2a7a01e1423fdb780d7fd7678a01029d10c6914 | 284ed75e0ace87d53394c2f5a62629b66819e092 | /pyqlight/command_line.py | 57a911129d53f54ed646f5835aa17ff086cada28 | [
"MIT"
]
| permissive | https://github.com/cryptk/pylight | e2cd738ac686497e9abaf09908d36207f99742db | beb78662f8ff5ae0c9670ad6227de6cc87b381ed | refs/heads/master | 2020-05-17T16:47:56.861992 | 2015-07-28T16:14:10 | 2015-07-28T16:14:10 | 39,591,012 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from pyqlight import QLight
from collections import OrderedDict
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
def main():
parser = ArgumentParser(description="control a Q-Light warning tower. "
"All lamps should have red, yellow and green "
"lights available. Blue and white lights may or "
"may not be available depending on exact model.",
epilog="Written by Chris Jowett, cryptk@gmail.com",
formatter_class=ArgumentDefaultsHelpFormatter)
lights = parser.add_argument_group('Light Controls', 'Valid states are '
'"off", "on", "blink", "pass"')
light_choices = ['off','on','blink','pass']
lights.add_argument("-r", "--red", help="Desired state of red lamp.",
type=str, nargs='?', default='pass', metavar='STATE',
choices=light_choices)
lights.add_argument("-y", "--yellow", help="Desired state of yellow lamp.",
type=str, nargs='?', default='pass', metavar='STATE',
choices=light_choices)
lights.add_argument("-g", "--green", help="Desired state of green lamp.",
type=str, nargs='?', default='pass', metavar='STATE',
choices=light_choices)
lights.add_argument("-b", "--blue", help="Desired state of blue lamp.",
type=str, nargs='?', default='pass', metavar='STATE',
choices=light_choices)
lights.add_argument("-w", "--white", help="Desired state of white lamp.",
type=str, nargs='?', default='pass', metavar='STATE',
choices=light_choices)
lights.add_argument("-a", "--all-lights", help="State of all lamps.",
type=str, nargs='?', metavar='STATE',
choices=light_choices)
tone = parser.add_argument_group('Tone Controls', 'valid tone options are '
'"off", "tone_1", "tone_2", "tone_3", '
'"tone_4", "tone_5", "pass"')
tone.add_argument("-t", "--tone", help="Desired tone to play.",
type=str, nargs='?', metavar='TONE', default='pass',
choices=['off',
'tone_1',
'tone_2',
'tone_3',
'tone_4',
'tone_5',
'pass'])
tone.add_argument("-d", "--duration",
help="Duration to play tone (in ms).",
type=int, nargs='?', default=0)
args = parser.parse_args()
ql = QLight()
if args.all_lights is not None:
ql.set_all_lights(args.all_lights)
else:
ql.lights = OrderedDict([
('red', args.red),
('yellow', args.yellow),
('green', args.green),
('blue', args.blue),
('white', args.white)
])
ql.update_lamp()
if args.tone:
ql.set_sound(args.tone, args.duration)
| UTF-8 | Python | false | false | 3,294 | py | 4 | command_line.py | 3 | 0.480267 | 0.476928 | 0 | 63 | 51.285714 | 79 |
KC2004/sqlAlchemy-hw | 4,939,212,443,340 | 40b0939c6a8ed35e0c9af739cbb5077de4540f4c | 370ef4712d31285cab584196e1603ccf0f2ae2af | /query.py | 5654be362c69feb6df75dff300d16a4e01550d5d | []
| no_license | https://github.com/KC2004/sqlAlchemy-hw | 8663b46558c8c09f4150aa4386131d967e617384 | 1b2c65e0847c7283610221b6eb8da72d6e96b458 | refs/heads/master | 2021-01-22T02:21:47.181976 | 2017-02-07T22:29:54 | 2017-02-07T22:29:54 | 81,048,237 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This file is the place to write solutions for the
skills assignment called skills-sqlalchemy. Remember to
consult the exercise instructions for more complete
explanations of the assignment.
All classes from model.py are being imported for you
here, so feel free to refer to classes without the
[model.]User prefix.
"""
from model import *
init_app()
# -------------------------------------------------------------------
# Part 2: Discussion Questions
# 1. What is the datatype of the returned value of
# ``Brand.query.filter_by(name='Ford')``?
#
# flask_sqlalchemy.BaseQuery object
# 2. In your own words, what is an association table, and what type of
# relationship (many to one, many to many, one to one, etc.) does an
# association table manage?
#
# Association table manages two, many to many, tables. It
# links those tables through foriegn keys. Association table
# has a one to many relationship
# to the tables.
# -------------------------------------------------------------------
# Part 3: SQLAlchemy Queries
# Get the brand with the ``id`` of "ram."
q1 = "Brand.query.filter_by(brand_id='ram').all()"
# Get all models with the name "Corvette" and the brand_id "che."
q2 = "Model.query.filter(Model.name=='Corvette', Model.brand_id=='che').all()"
# Get all models that are older than 1960.
q3 = "Model.query.filter(Model.year < 1960).all()"
# Get all brands that were founded after 1920.
q4 = "Brand.query.filter(Brand.founded > 1920).all()"
# Get all models with names that begin with "Cor."
q5 = "Model.query.filter(Model.name.like('Cor%')).all()"
# Get all brands that were founded in 1903 and that are not yet discontinued.
q6 = "Brand.query.filter(Brand.founded=='1903', Brand.discontinued==None).all()"
# Get all brands that are either 1) discontinued (at any time) or 2) founded
# before 1950.
q7 = "Brand.query.filter((Brand.founded < '1950') | (Brand.discontinued!=None)).all()"
# Get any model whose brand_id is not "for."
q8 = "Model.query.filter(Model.brand_id!='for').all()"
# -------------------------------------------------------------------
# Part 4: Write Functions
def get_model_info(year):
"""Takes in a year and prints out each model name, brand name, and brand
headquarters for that year using only ONE database query."""
car_list = db.session.query(Model.name, Brand.brand_id, Brand.name, Brand.headquarters).filter(Model.year==year).\
join(Brand).all()
for car in car_list:
print "%s \t %s \t %s \t %s \n" % (car[0], car[1], car[2], car[3])
def get_brands_summary():
"""Prints out each brand name and each model name with year for that brand
using only ONE database query."""
car_list = db.session.query(Brand.name, Model.name).filter(Model.year).join(Brand).all()
def search_brands_by_name(mystr):
"""Returns all Brand objects corresponding to brands whose names include
the given string."""
brand_objs = Brand.query.filter(Brand.name.like('%mystr%')).all()
return brand_objs
def get_models_between(start_year, end_year):
"""Returns all Model objects corresponding to models made between
start_year (inclusive) and end_year (exclusive)."""
cars_in_range = Model.query.filter(Model.year > start_year, Model.year < end_year).all()
return cars_in_range
| UTF-8 | Python | false | false | 3,312 | py | 1 | query.py | 1 | 0.656099 | 0.6407 | 0 | 112 | 28.553571 | 118 |
wsgan001/data-mining-course | 4,045,859,194,332 | e726cbecbdf6be3a942e8808f0c684bc42791489 | 97b75306f4c7e2d366cbe58aa7c979c3c1fb2ff1 | /assignment6/santander.py | ad272288b1a1d35cf051878b8eed679e9973cebf | []
| no_license | https://github.com/wsgan001/data-mining-course | 0bd65ce954be28a1170dd3bd188ad0f2a01b66c9 | 4bd869b99a29933ce7304bf08f5f94aa87e95e9d | refs/heads/master | 2020-03-27T00:47:40.383517 | 2016-12-22T07:53:11 | 2016-12-22T07:53:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 16-12-1 下午1:10
# @Author : 骆克云
# @File : santander.py
# @Software: PyCharm
import pandas as pd
import numpy as np
import xgboost as xgb
import csv
from sklearn import preprocessing
data_dir = "./data/"
# 离散数据类型
mapping_dict = {
# 雇佣状态
'ind_empleado': {-99: 0, 'N': 1, 'B': 2, 'F': 3, 'A': 4, 'S': 5},
# 性别
'sexo': {'V': 0, 'H': 1, -99: 2},
# 是否是新用户
'ind_nuevo': {'0': 0, '1': 1, -99: 2},
# 是否是主要客户
# 'indrel': {'1': 0, '99': 1, -99: 2},
# 月初的客户类型
# 'indrel_1mes': {-99: 0, '1.0': 1, '1': 1, '2.0': 2, '2': 2, '3.0': 3, '3': 3, '4.0': 4, '4': 4, 'P': 5},
# 月初的客户关系类型
# 'tiprel_1mes': {-99: 0, 'I': 1, 'A': 2, 'P': 3, 'R': 4, 'N': 5},
# 居住地与银行是否相同
# 'indresi': {-99: 0, 'S': 1, 'N': 2},
# 客户出生地与银行所在地是否相同
# 'indext': {-99: 0, 'S': 1, 'N': 2},
# 客户是否是员工的配偶
# 'conyuemp': {-99: 0, 'S': 1, 'N': 2},
# 客户是否已经死亡
# 'indfall': {-99: 0, 'S': 1, 'N': 2},
# 地址类型:全是1
# 'tipodom': {-99: 0, '1': 1},
# 省份
'nomprov': {'GIRONA': 0, 'ZAMORA': 1, 'BARCELONA': 2, 'SALAMANCA': 3, 'BURGOS': 4, 'HUESCA': 5, 'NAVARRA': 6,
'AVILA': 7, 'SEGOVIA': 8, 'LUGO': 9, 'LERIDA': 10, 'MADRID': 11, 'ALICANTE': 12, 'SORIA': 13,
'SEVILLA': 14, 'CANTABRIA': 15, 'BALEARS, ILLES': 16, 'VALLADOLID': 17, 'PONTEVEDRA': 18,
'VALENCIA': 19, 'TERUEL': 20, 'CORUÑA, A': 21, 'OURENSE': 22, 'JAEN': 23, 'CUENCA': 24, 'BIZKAIA': 25,
'CASTELLON': 26, 'RIOJA, LA': 27, 'ALBACETE': 28, 'BADAJOZ': 29, 'MURCIA': 30, 'CADIZ': 31, -99: 32,
'ALMERIA': 33, 'GUADALAJARA': 34, 'PALENCIA': 35, 'PALMAS, LAS': 36, 'CORDOBA': 37, 'HUELVA': 38,
'GRANADA': 39, 'ASTURIAS': 40, 'SANTA CRUZ DE TENERIFE': 41, 'MELILLA': 42, 'TARRAGONA': 43,
'ALAVA': 44, 'CEUTA': 45, 'MALAGA': 46, 'CIUDAD REAL': 47, 'ZARAGOZA': 48, 'TOLEDO': 49, 'LEON': 50,
'GIPUZKOA': 51, 'CACERES': 52},
# 是否是活跃客户
'ind_actividad_cliente': {'0': 0, '1': 1, -99: 2},
# 分割类型
'segmento': {'02 - PARTICULARES': 0, '03 - UNIVERSITARIO': 1, '01 - TOP': 2, -99: 2},
# 客户所居住的国家
'pais_residencia': {'LV': 102, 'BE': 12, 'BG': 50, 'BA': 61, 'BM': 117, 'BO': 62, 'JP': 82, 'JM': 116, 'BR': 17,
'BY': 64, 'BZ': 113, 'RU': 43, 'RS': 89, 'RO': 41, 'GW': 99, 'GT': 44, 'GR': 39, 'GQ': 73,
'GE': 78, 'GB': 9, 'GA': 45, 'GN': 98, 'GM': 110, 'GI': 96, 'GH': 88, 'OM': 100, 'HR': 67,
'HU': 106, 'HK': 34, 'HN': 22, 'AD': 35, 'PR': 40, 'PT': 26, 'PY': 51, 'PA': 60, 'PE': 20,
'PK': 84, 'PH': 91, 'PL': 30, 'EE': 52, 'EG': 74, 'ZA': 75, 'EC': 19, 'AL': 25, 'VN': 90,
'ET': 54, 'ZW': 114, 'ES': 0, 'MD': 68, 'UY': 77, 'MM': 94, 'ML': 104, 'US': 15, 'MT': 118,
'MR': 48, 'UA': 49, 'MX': 16, 'IL': 42, 'FR': 8, 'MA': 38, 'FI': 23, 'NI': 33, 'NL': 7,
'NO': 46, 'NG': 83, 'NZ': 93, 'CI': 57, 'CH': 3, 'CO': 21, 'CN': 28, 'CM': 55, 'CL': 4, 'CA': 2,
'CG': 101, 'CF': 109, 'CD': 112, 'CZ': 36, 'CR': 32, 'CU': 72, 'KE': 65, 'KH': 95, 'SV': 53,
'SK': 69, 'KR': 87, 'KW': 92, 'SN': 47, 'SL': 97, 'KZ': 111, 'SA': 56, 'SG': 66, 'SE': 24,
'DO': 11, 'DJ': 115, 'DK': 76, 'DE': 10, 'DZ': 80, 'MK': 105, -99: 1, 'LB': 81, 'TW': 29,
'TR': 70, 'TN': 85, 'LT': 103, 'LU': 59, 'TH': 79, 'TG': 86, 'LY': 108, 'AE': 37, 'VE': 14,
'IS': 107, 'IT': 18, 'AO': 71, 'AR': 13, 'AU': 63, 'AT': 6, 'IN': 31, 'IE': 5, 'QA': 58,
'MZ': 27},
# 客户加入的渠道
'canal_entrada': {'013': 49, 'KHP': 160, 'KHQ': 157, 'KHR': 161, 'KHS': 162, 'KHK': 10, 'KHL': 0, 'KHM': 12,
'KHN': 21, 'KHO': 13, 'KHA': 22, 'KHC': 9, 'KHD': 2, 'KHE': 1, 'KHF': 19, '025': 159, 'KAC': 57,
'KAB': 28, 'KAA': 39, 'KAG': 26, 'KAF': 23, 'KAE': 30, 'KAD': 16, 'KAK': 51, 'KAJ': 41, 'KAI': 35,
'KAH': 31, 'KAO': 94, 'KAN': 110, 'KAM': 107, 'KAL': 74, 'KAS': 70, 'KAR': 32, 'KAQ': 37,
'KAP': 46, 'KAW': 76, 'KAV': 139, 'KAU': 142, 'KAT': 5, 'KAZ': 7, 'KAY': 54, 'KBJ': 133,
'KBH': 90, 'KBN': 122, 'KBO': 64, 'KBL': 88, 'KBM': 135, 'KBB': 131, 'KBF': 102, 'KBG': 17,
'KBD': 109, 'KBE': 119, 'KBZ': 67, 'KBX': 116, 'KBY': 111, 'KBR': 101, 'KBS': 118, 'KBP': 121,
'KBQ': 62, 'KBV': 100, 'KBW': 114, 'KBU': 55, 'KCE': 86, 'KCD': 85, 'KCG': 59, 'KCF': 105,
'KCA': 73, 'KCC': 29, 'KCB': 78, 'KCM': 82, 'KCL': 53, 'KCO': 104, 'KCN': 81, 'KCI': 65,
'KCH': 84, 'KCK': 52, 'KCJ': 156, 'KCU': 115, 'KCT': 112, 'KCV': 106, 'KCQ': 154, 'KCP': 129,
'KCS': 77, 'KCR': 153, 'KCX': 120, 'RED': 8, 'KDL': 158, 'KDM': 130, 'KDN': 151, 'KDO': 60,
'KDH': 14, 'KDI': 150, 'KDD': 113, 'KDE': 47, 'KDF': 127, 'KDG': 126, 'KDA': 63, 'KDB': 117,
'KDC': 75, 'KDX': 69, 'KDY': 61, 'KDZ': 99, 'KDT': 58, 'KDU': 79, 'KDV': 91, 'KDW': 132,
'KDP': 103, 'KDQ': 80, 'KDR': 56, 'KDS': 124, 'K00': 50, 'KEO': 96, 'KEN': 137, 'KEM': 155,
'KEL': 125, 'KEK': 145, 'KEJ': 95, 'KEI': 97, 'KEH': 15, 'KEG': 136, 'KEF': 128, 'KEE': 152,
'KED': 143, 'KEC': 66, 'KEB': 123, 'KEA': 89, 'KEZ': 108, 'KEY': 93, 'KEW': 98, 'KEV': 87,
'KEU': 72, 'KES': 68, 'KEQ': 138, -99: 6, 'KFV': 48, 'KFT': 92, 'KFU': 36, 'KFR': 144, 'KFS': 38,
'KFP': 40, 'KFF': 45, 'KFG': 27, 'KFD': 25, 'KFE': 148, 'KFB': 146, 'KFC': 4, 'KFA': 3, 'KFN': 42,
'KFL': 34, 'KFM': 141, 'KFJ': 33, 'KFK': 20, 'KFH': 140, 'KFI': 134, '007': 71, '004': 83,
'KGU': 149, 'KGW': 147, 'KGV': 43, 'KGY': 44, 'KGX': 24, 'KGC': 18, 'KGN': 11}
}
# 离散特征
cat_cols = list(mapping_dict.keys())
# 预测的列
target_cols = ['ind_ahor_fin_ult1', 'ind_aval_fin_ult1', 'ind_deco_fin_ult1', 'ind_deme_fin_ult1', 'ind_cco_fin_ult1',
'ind_cder_fin_ult1', 'ind_cno_fin_ult1',
'ind_ctju_fin_ult1', 'ind_ctma_fin_ult1', 'ind_ctop_fin_ult1', 'ind_ctpp_fin_ult1',
'ind_dela_fin_ult1', 'ind_ecue_fin_ult1', 'ind_fond_fin_ult1', 'ind_hip_fin_ult1',
'ind_plan_fin_ult1', 'ind_pres_fin_ult1', 'ind_reca_fin_ult1', 'ind_tjcr_fin_ult1', 'ind_valo_fin_ult1',
'ind_viv_fin_ult1', 'ind_nomina_ult1', 'ind_nom_pens_ult1', 'ind_recibo_ult1']
target_cols = target_cols[4:]
num_cols = ['age', 'renta', 'antiguedad']
feature_columns = cat_cols + num_cols
def clean_data(file_train="train_ver2.csv", file_test="test_ver2.csv"):
raw_file = pd.read_csv(data_dir + file_train, parse_dates=['fecha_dato', 'fecha_alta'], skipinitialspace=True,
dtype=str)
test_file = pd.read_csv(data_dir + file_test, parse_dates=['fecha_dato', 'fecha_alta'], skipinitialspace=True,
dtype=str)
# id
raw_file["ncodpers"] = raw_file["ncodpers"].astype(int)
test_file["ncodpers"] = test_file["ncodpers"].astype(int)
test_id = test_file["ncodpers"]
# time
raw_file.dropna(subset=['fecha_alta'], inplace=True)
raw_file['segmento'].fillna('01 - TOP', inplace=True)
test_file['segmento'].fillna('01 - TOP', inplace=True)
raw_file['ind_nomina_ult1'].fillna('0', inplace=True)
raw_file['ind_nom_pens_ult1'].fillna('0', inplace=True)
## 连续型变量
raw_file['age'] = raw_file['age'].astype(float)
raw_file['age'].fillna(raw_file['age'].mean(), inplace=True)
test_file['age'] = test_file['age'].astype(float)
test_file['age'].fillna(raw_file['age'].mean(), inplace=True)
raw_file['renta'] = raw_file['renta'].astype(float)
raw_file['renta'].fillna(raw_file['renta'].mean(), inplace=True)
test_file['renta'] = test_file['renta'].astype(float)
test_file['renta'].fillna(raw_file['renta'].mean(), inplace=True)
raw_file['antiguedad'] = raw_file['antiguedad'].astype(float)
raw_file['antiguedad'].fillna(raw_file['antiguedad'].mean(), inplace=True)
test_file['antiguedad'] = test_file['antiguedad'].astype(float)
test_file['antiguedad'].fillna(raw_file['antiguedad'].mean(), inplace=True)
# 类型转换
for tar in target_cols:
raw_file[tar] = raw_file[tar].astype(int)
# 离散特征映射
raw_file.fillna(-99, inplace=True)
test_file.fillna(-99, inplace=True)
for col in cat_cols:
raw_file[col] = raw_file[col].apply(lambda x: mapping_dict[col][x])
test_file[col] = test_file[col].apply(lambda x: mapping_dict[col][x])
date_train = pd.to_datetime(["2015-01-28", "2015-02-28", "2015-03-28", "2015-04-28", "2015-05-28"])
date_test = pd.to_datetime(["2016-01-28", "2016-02-28", "2016-03-28", "2016-04-28", "2016-05-28"])
data_train = raw_file.loc[raw_file["fecha_dato"].isin(date_train)]
data_test = raw_file.loc[raw_file["fecha_dato"].isin(date_test)]
train_file = raw_file.loc[raw_file["fecha_dato"] == pd.to_datetime("2015-06-28")]
data_train.to_csv(data_dir + 'data_train_clean.csv', index=False)
data_test.to_csv(data_dir + 'data_test_clean.csv', index=False)
test_file.to_csv(data_dir + 'test_file_clean.csv', index=False)
train_file.to_csv(data_dir + 'train_file_clean.csv', index=False)
def had_in_past(*args):
arr = np.array(args)
sum_col = arr.sum(axis=0)
already_had = np.ones(len(sum_col))
mask = np.where(sum_col < 1)
already_had[mask] = 0
return list(already_had)
def flattern(feature):
feature_flattern = []
for item in feature:
feature_flattern.extend(item)
return feature_flattern
def get_data_train(file_name, train_file):
train_X = []
train_y = []
user_dict = [{} for _ in range(6)]
target_len = len(target_cols)
with open(file_name) as f:
f_csv = csv.DictReader(f)
for row in f_csv:
# 用户特征
user_id = int(row['ncodpers'])
i = int(row["fecha_dato"][6]) % 6
if i != 0:
target_list = [int(float(row[target])) for target in target_cols]
user_dict[i][user_id] = target_list[:]
with open(train_file) as f:
f_csv = csv.DictReader(f)
for row in f_csv:
user_id = int(row['ncodpers'])
# 特征提取
X_feature = []
# 离散特征
X_feature.append([int(row[col]) for col in cat_cols])
# 连续特征
X_feature.append([int(float(row[col])) for col in num_cols])
X_feature = flattern(X_feature)
if row['fecha_dato'] == '2015-06-28':
user05 = user_dict[5].get(user_id, [0] * target_len)
user01 = user_dict[1].get(user_id, [0] * target_len)
user02 = user_dict[2].get(user_id, [0] * target_len)
user03 = user_dict[3].get(user_id, [0] * target_len)
user04 = user_dict[4].get(user_id, [0] * target_len)
already_had = had_in_past(user05, user04, user03, user02, user01)
user06 = [int(float(row[target])) for target in target_cols]
new_products = [max(x6 - x5, 0) for (x6, x5) in zip(user06, user05)]
# 仅6月份购买过商品的用户参与训练
if sum(new_products) > 0:
for ind, prod in enumerate(new_products):
if prod > 0:
# assert len(user05) == target_len
train_X.append(
X_feature + user05 + user04 + user02 + user01 + user03 + already_had)
train_y.append(ind)
return np.array(train_X, dtype=int), np.array(train_y, dtype=int)
def get_data_test(file_name, test_file):
test_X = []
user_dict = [{} for _ in range(6)]
user_05_had = []
target_len = len(target_cols)
with open(file_name) as f:
f_csv = csv.DictReader(f)
for row in f_csv:
# 用户特征
user_id = int(row['ncodpers'])
i = int(row["fecha_dato"][6]) % 6
if i != 0:
target_list = [int(float(row[target])) for target in target_cols]
user_dict[i][user_id] = target_list[:]
with open(test_file) as f:
f_csv = csv.DictReader(f)
for row in f_csv:
# 用户
user_id = int(row['ncodpers'])
# 特征提取
X_feature = []
# 离散特征
X_feature.append([int(row[col]) for col in cat_cols])
# 连续特征
X_feature.append([int(float(row[col])) for col in num_cols])
X_feature = flattern(X_feature)
if row['fecha_dato'] == '2016-06-28':
user05 = user_dict[5].get(user_id, [0] * target_len)
user01 = user_dict[1].get(user_id, [0] * target_len)
user02 = user_dict[2].get(user_id, [0] * target_len)
user03 = user_dict[3].get(user_id, [0] * target_len)
user04 = user_dict[4].get(user_id, [0] * target_len)
already_had = had_in_past(user05, user04, user03, user02, user01)
user_05_had.append(user05)
test_X.append(
X_feature + user05 + user04 + user02 + user01 + user03 + already_had)
return np.array(test_X, dtype=int), np.array(user_05_had, dtype=int)
def XGBModel(train_X, train_y, seed=2016):
param = {}
param['objective'] = 'multi:softprob'
param['eta'] = 0.1
param['max_depth'] = 9
param['silent'] = 1
param['num_class'] = 20
param['eval_metric'] = "mlogloss"
param['min_child_weight'] = 4
param['gamma'] = 3
param['subsample'] = 0.90
param['colsample_bytree'] = 0.9
param['seed'] = seed
num_rounds = 80
plst = list(param.items())
xgtrain = xgb.DMatrix(train_X, label=train_y)
model = xgb.train(plst, xgtrain, num_rounds)
return model
if __name__ == "__main__":
print("开始清洗数据...")
clean_data()
print("清洗数据完成, 获取训练集...")
train_X, train_y = get_data_train(data_dir + 'data_train_clean.csv', data_dir + 'train_file_clean.csv')
print("训练集大小:", train_X.shape)
print("获取预测数据...")
test_X, user_05_had = get_data_test(data_dir + 'data_test_clean.csv', data_dir + 'test_file_clean.csv')
print("预测集大小:", test_X.shape)
print("开始构建模型...")
model = XGBModel(train_X, train_y)
print("开始预测...")
xgtest = xgb.DMatrix(test_X)
preds = model.predict(xgtest)
print("得到预测结果...")
target_cols = np.array(target_cols)
# 取前7个值最大列
# 过滤掉5月份已经存在的商品
predictions = []
length = len(preds)
assert length == len(user_05_had)
for i in range(length):
already = np.argwhere(user_05_had[i] == 1).flatten()
if already.size != 0:
preds[i][already] = 0
pred = np.argsort(-preds[i])
flag = 7
for j in range(7):
if preds[i][pred[j]] == 0:
flag = j
break
predictions.append(pred[:flag])
# 取测试集ID
test_id = np.array(pd.read_csv(data_dir + "test_ver2.csv", usecols=['ncodpers'])['ncodpers'])
final_preds = [" ".join(list(target_cols[pred])) for pred in predictions]
result = pd.DataFrame({'ncodpers': test_id, 'added_products': final_preds})
result.to_csv('sub_xgb_12_19_3.csv', index=False)
| UTF-8 | Python | false | false | 16,080 | py | 43 | santander.py | 31 | 0.500933 | 0.42654 | 0 | 333 | 45.663664 | 120 |
openstack/freezer-tempest-plugin | 11,029,476,058,159 | b1feb1a2c27cac8e34e8ac0cad1598457193bea6 | 4db753584b6d4dc4f2e594a79b4bab65b5aba80e | /freezer_tempest_plugin/tests/freezer_api/api/test_api_jobs.py | 4edf5650239a116b386602b1ea4c32a1e4a075bd | [
"Apache-2.0"
]
| permissive | https://github.com/openstack/freezer-tempest-plugin | 03f5db6a8a5dcf79f8e1fcd57439e0decae5e4de | f8fe406aafb82878837706984b9a441a46aaf088 | refs/heads/master | 2023-09-03T22:11:35.113236 | 2023-02-14T01:58:32 | 2023-02-14T01:58:32 | 94,874,360 | 7 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from oslo_serialization import jsonutils as json
from freezer_tempest_plugin.tests.freezer_api.api import base
fake_job = {
"job_actions":
[
{
"freezer_action":
{
"action": "backup",
"mode": "fs",
"path_to_backup": "/home/tylerdurden/project_mayhem",
"backup_name": "project_mayhem_backup",
"container": "my_backup_container",
},
"exit_status": "success",
"max_retries": 1,
"max_retries_interval": 1,
"mandatory": True
}
],
"job_schedule":
{
"time_created": 1234,
"time_started": 1234,
"time_ended": 0,
"status": "stop",
"schedule_date": "2015-06-02T16:20:00",
"schedule_month": "1-6, 9-12",
"schedule_day": "mon, wed, fri",
"schedule_hour": "03",
"schedule_minute": "25",
},
"job_id": "blabla",
"client_id": "01b0f00a-4ce2-11e6-beb8-9e71128cae77_myhost.mydomain.mytld",
"user_id": "blabla",
"description": "scheduled one shot"
}
class TestFreezerApiJobs(base.BaseFreezerApiTest):
@classmethod
def resource_setup(cls):
super(TestFreezerApiJobs, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(TestFreezerApiJobs, cls).resource_cleanup()
@decorators.attr(type="gate")
def test_api_jobs(self):
resp, response_body = self.freezer_api_client.get_jobs()
self.assertEqual(200, resp.status)
response_body_json = json.loads(response_body)
self.assertIn('jobs', response_body_json)
jobs = response_body_json['jobs']
self.assertEmpty(jobs)
@decorators.attr(type="gate")
def test_api_jobs_get_limit(self):
# limits > 0 should return successfully
for valid_limit in [2, 1]:
resp, body = self.freezer_api_client.get_jobs(limit=valid_limit)
self.assertEqual(200, resp.status)
# limits <= 0 should raise a bad request error
# for bad_limit in [0, -1, -2]:
# self.assertRaises(exceptions.BadRequest,
# self.freezer_api_client.get_jobs,
# limit=bad_limit)
@decorators.attr(type="gate")
def test_api_jobs_get_offset(self):
# offsets >= 0 should return 200
for valid_offset in [1, 0]:
resp, body = self.freezer_api_client.get_jobs(offset=valid_offset)
self.assertEqual(200, resp.status)
# offsets < 0 should return 400
# for bad_offset in [-1, -2]:
# self.assertRaises(exceptions.BadRequest,
# self.freezer_api_client.get_jobs,
# offset=bad_offset)
@decorators.attr(type="gate")
def test_api_jobs_post(self):
# Create the job with POST
resp, response_body = self.freezer_api_client.post_jobs(fake_job)
self.assertEqual(201, resp.status)
self.assertIn('job_id', response_body)
job_id = response_body['job_id']
# Check that the job has the correct values
resp, response_body = self.freezer_api_client.get_jobs(job_id)
self.assertEqual(200, resp.status)
# Delete the job
resp, response_body = self.freezer_api_client.delete_jobs(
job_id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_api_jobs_with_invalid_client_project_id_fail(self):
"""Ensure that a job submitted with a bad client_id project id fails"""
fake_bad_job = fake_job
fake_bad_job['client_id'] = 'bad%project$id_host.domain.tld'
# Create the job with POST
self.assertRaises(exceptions.BadRequest,
lambda: self.freezer_api_client.post_jobs(
fake_bad_job))
@decorators.attr(type="gate")
def test_api_jobs_with_invalid_client_host_fail(self):
"""Ensure that a job submitted with a bad client_id hostname fails"""
fake_bad_job = fake_job
fake_bad_job['client_id'] = ("01b0f00a-4ce2-11e6-beb8-9e71128cae77"
"_bad_hostname.bad/domain.b")
# Create the job with POST
self.assertRaises(exceptions.BadRequest,
lambda: self.freezer_api_client.post_jobs(
fake_bad_job))
def test_api_jobs_with_only_fqdn_succeeds(self):
"""Ensure that a job submitted with only an FQDN succeeds"""
fqdn_only_job = fake_job
fqdn_only_job['client_id'] = 'padawan-ccp-c1-m1-mgmt'
# Attempt to post the job, should succeed
resp, response_body = self.freezer_api_client.post_jobs(fqdn_only_job)
self.assertEqual(201, resp.status)
| UTF-8 | Python | false | false | 5,685 | py | 27 | test_api_jobs.py | 19 | 0.585576 | 0.564116 | 0 | 154 | 35.915584 | 79 |
sztojfen/Euler | 10,453,950,402,596 | bd1d82fc8f6de0f385a5cdb335df40c550d722c6 | 09af8ebe9c02362f797dd9c4b132e68ba51f4860 | /15.py | 567b4f497a376588f7ab0b8cea6a87c76032664c | []
| no_license | https://github.com/sztojfen/Euler | 5471f2fa13c32bddea1efb9526ab51c2d2489a03 | 9f36dc8d91ca0590ac48ed83583720f551ec3eca | refs/heads/master | 2021-06-03T09:16:34.163012 | 2020-12-03T14:02:38 | 2020-12-03T14:02:38 | 145,708,684 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'stefan'
def silnia(n):
if n==1:
return 1
else:
return n*silnia(n-1)
print silnia(40)/(silnia(20)*silnia(20))
| UTF-8 | Python | false | false | 151 | py | 47 | 15.py | 47 | 0.536424 | 0.476821 | 0 | 9 | 15.555556 | 40 |
aynella/lab22 | 6,889,127,544,284 | e5a7f5c5495040e516822eeca4589be8f0e9f8ae | 8289e5c60547fba8ab56542f2f9a5e5bf1437e9d | /ex1.py | 422adfa7ccb83547ed9162dfa5a90342f66d29b9 | []
| no_license | https://github.com/aynella/lab22 | ddc85832904d30021a6faa1cb80fd548071a744c | b1b5e794d1608777270c9e9f7c76a31837bc7d61 | refs/heads/master | 2021-01-06T11:29:56.112877 | 2020-02-18T08:47:24 | 2020-02-18T08:47:24 | 241,311,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def ex_1(n, i):
return n[i]
print(ex_1([10, 3, 4, 5, 9], 3))
print(ex_1([10, 3, 4, 5, 9], 4))
| UTF-8 | Python | false | false | 98 | py | 14 | ex1.py | 14 | 0.469388 | 0.295918 | 0 | 4 | 23.5 | 32 |
ssgalitsky/pymm | 5,557,687,719,936 | e3afb3d62472a10313ec950ae812619696aebd64 | ac81bb2c70a68dbc3e0403996e88def41cd9d11d | /setup.py | 30b5094c630aa09ecb658f467a05453c5e803173 | [
"MIT"
]
| permissive | https://github.com/ssgalitsky/pymm | 1596ec6c15d399211e45453c5491a93178d5674f | 77f0cfc32a6819781cf61e3c311f1340406e3fba | refs/heads/master | 2020-06-04T06:57:14.761501 | 2017-02-17T21:37:10 | 2017-02-17T21:37:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from distutils.core import setup
# thanks to http://www.diveintopython3.net/packaging.html
# for the excellent tutorial on packaging python modules
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Operating System :: OS Independent',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: XML' # interpret xml-based mindmaps
]
descr = (
'python module to read / write Mindmap (.mm) files built with',
'Freemind and Freeplane',
)
long_descr = """\
Module for reading, writing, editting, and creating Mind Maps.
----------------------------------------------------------------------------
This module builds on top of xml.etree.ElementTree to intrepret the xml
structure of MindMaps, and presents the information in a clear and intuitive
manner that mimicks how Freeplane and Freemind build their own.
Building a mindmap is easy::
import pymm
with pymm.Mindmap('output.mm', 'w') as mm:
root = mm.root
root.cloud = pymm.Cloud(SHAPE='STAR')
root.text = 'space topics'
for text in ['stars', 'planets', 'astroids']:
root.children.append(pymm.Node(TEXT=text))
# on context-exit, mindmap is written to file
Reading, editting, and writing a mindmap is also easy::
import pymm
mm = pymm.Mindmap('docs/pymm_documentation.mm')
root = mm.nodes[0]
root.nodes.append(pymm.Node(TEXT='another child of root'))
pymm.write('output2.mm', mm)
"""
setup(
name='pymm',
packages=['pymm'],
version='0.3.5',
author='Lance Kindle',
author_email='lance.kindle@gmail.com',
url='http://www.github.com/lancekindle/pymm',
classifiers=classifiers,
description=descr,
long_description=long_descr
)
| UTF-8 | Python | false | false | 1,974 | py | 16 | setup.py | 10 | 0.650456 | 0.645897 | 0 | 52 | 36.961538 | 79 |
Julien-Kinderf/word-blitz-solver | 6,700,149,007,520 | 5855309cd9f803b6c1dd4d5f9f0dac4bed9efa71 | 9b195cf355d4bba73feb6867d9eab1494933f5f4 | /src/solver.py | c769ea7d26c818d69ddcc5d28ba0c51a722e7da4 | []
| no_license | https://github.com/Julien-Kinderf/word-blitz-solver | e77fef6b1703638085a1b79f573284e74cadb992 | 4dcdf90de8fc3287822dbf7dc97b64931f42f1d7 | refs/heads/master | 2022-04-16T03:16:07.018372 | 2020-04-09T20:20:03 | 2020-04-09T20:20:03 | 250,363,984 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
"""
This script
"""
import sys
import os
import time
import ocr
def neighboors(case):
"""
Returns the list of neighboors of the input node, following our grid convention
"""
# Switch case made with a dictionnary
n = {0: [1, 4, 5],
1: [0, 2, 5, 6, 4],
2: [1, 3, 6, 7, 5],
3: [2, 7, 6],
4: [0, 1, 5, 9, 8],
5: [4, 6, 1, 9, 0, 10, 2, 8],
6: [5, 7, 2, 10, 1, 11, 3, 9],
7: [2, 3, 6, 10, 11],
8: [4, 5, 9, 12, 13],
9: [8, 10, 5, 13, 4, 14, 6, 12],
10: [9, 11, 6, 14, 5, 15, 7, 13],
11: [6, 7, 10, 14, 15],
12: [13, 8, 9],
13: [12, 14, 9, 8, 10],
14: [13, 15, 10, 9, 11],
15: [14, 11, 10, ]}
return(n[case])
def getstring(chemin, grille):
"""Renvoie la chaine de caractères correspondant au chemin fourni en entrée"""
s = "".join([grille[i] for i in chemin])
return(s)
def cleandict(path, grid):
"""Nettoie le dictionnaire en retirant tous les mots non faisables"""
with open(path, "r+") as textfile:
text = textfile.read().split()
dictionnaire = []
startsize = len(text)
for mot in text:
if not ((len(mot) == 1) or (len(mot) >= 17) or ("-" in mot) or not (set(mot) <= set(grid))):
dictionnaire.append(mot)
endsize = len(dictionnaire)
return(dictionnaire)
def solve(grid, path_to_dict):
"""
@parameter : a grid of letters
@output : a dictionnary : keys are possible words and elements are the necessary moves
"""
motstrouves = {}
# Récupération du dictionnaire
dictionnaire = cleandict(path_to_dict, grid)
# Initialisation des chemins de base :
# (ce sont juste des numéros de cases)
chemins = []
for i in range(16):
chemins.append([i])
# On évalue tour à tour chaque chemin
while len(chemins) > 0:
chemin = chemins[0]
# print(f"Etude du chemin {chemin}")
# Pour chacun de ces chemins on regarde l'ensemble des voisins de sa dernière case
# Naturellement, on en exclut les voisins qui sont déjà dans le chemin
voisins = [v for v in neighboors(chemin[-1]) if v not in chemin]
# On observe à présent chacun de ces voisins :
for voisin in voisins:
found = False
# print(f"\tEtude du voisin {voisin} :")
# On regarde le nouveau chemin potentiel et la chaine associée
newchemin = chemin[:]
newchemin.append(voisin)
# ex: [0, 1] devient [0, 1, 2]
newchaine = getstring(newchemin, grid)
# print(f"\t\tchemin {newchemin} pour chaine {newchaine} : ", end="")
# On compare à ce qu'on a dans le dictionnaire :
# Si il y a un match c'est chouette
if ((newchaine not in motstrouves.keys()) and (newchaine in dictionnaire)):
chemins.append(newchemin)
found = True
motstrouves[newchaine] = newchemin
# print("trouvé dans le dictionnaire")
continue # Puisque la chaine est un mot il y a des bonnes chances qu'il existe aussi d'autres chaines qui commencent par ce mot
# On peut quand même espérer un match si des mots commencent par cette chaine
for mot in dictionnaire:
if (mot.startswith(newchaine)):
chemins.append(newchemin)
found = True
# print(f"Début de la chaine {mot}")
break # Il suffit de trouver un mot qui commence par cette chaine pour que ça vaille le coup de la garder
if not found:
# print("pas dans le dictionnaire")
pass
# Après l'avoir analysé on supprime le chemin
chemins.remove(chemin)
return(motstrouves)
"""
solution = solve(grid)
for mot in solution.keys():
print(f"{mot} : {solution[mot]}")
print(f"Temps d'execution : {time.time() - start_time} secondes")
"""
if __name__ == "__main__":
start_time = time.time()
os.system('clear')
if (len(sys.argv) != 2):
print(f"Usage : python3 solver.py <path to your image>")
exit(0)
# Retrieving the input file
relative_path_to_image = sys.argv[1]
if (os.path.exists(relative_path_to_image)):
absolute_path_to_image = os.path.abspath(relative_path_to_image)
else:
print(f"Cannot find {relative_path_to_image}")
exit(1)
# Retrieving the identified letter folder path
absolute_path_to_letters = __file__.replace(
"/src/solver.py", "/img/identified/")
# Retrieving the identified letter folder path
absolute_path_to_exec = __file__.replace(
"/solver.py", "/executioner.sh")
# Retrieving the path for the used dictionnary
absolute_path_to_dictionnary = __file__.replace(
"/src/solver.py", "/words/dictionnary.txt")
# getting the grid
grid = ocr.getGrid(absolute_path_to_image, absolute_path_to_letters)
print(
f"Detected the following grid in {round(time.time() - start_time, 2)} seconds :\n{' '.join(grid[0:4])}\n{' '.join(grid[4:8])}\n{' '.join(grid[8:12])}\n{' '.join(grid[12:16])}\n")
# Beginning of the word research phase
print("Starting the word research ...")
research_start_time = time.time()
solution = solve(grid, absolute_path_to_dictionnary)
research_end_time = time.time()
print(
f"Found {len(solution)} possible words in {round(research_end_time - research_start_time, 2)} seconds :")
for word in solution.keys():
print(f"{word} ({len(word)}): {solution[word]}")
print(
f"Found {len(solution)} words in {round(time.time() - start_time, 2)} seconds")
# exit() # What follows is not ready yet
print()
typing_start = time.time()
print(f"Starting to send words to the phone :")
nbwords = 0
for word in sorted(solution.keys(), reverse=True, key=lambda w: len(w)):
print(f"Entering word {word}")
pattern = ' '.join([str(i) for i in solution[word]])
os.system(f"bash {absolute_path_to_exec} {pattern}")
nbwords += 1
if ((time.time() - start_time) > 40): # Put this value to 78 to play all time
break
print(
f"Entered {nbwords} words in {round(time.time() - typing_start, 2)} seconds")
| UTF-8 | Python | false | false | 6,444 | py | 7 | solver.py | 5 | 0.577145 | 0.549899 | 0 | 189 | 32.984127 | 186 |
BhagyeshDudhediya/PythonPrograms | 16,569,983,830,510 | 4df0cac6d76dda3a1af63e399d508cec7159e781 | deea011bef7b656eb579a9769d07f14478a32690 | /18-loop-cntl-statements.py | f64b5167af240ae2308968153e4842076985700f | []
| no_license | https://github.com/BhagyeshDudhediya/PythonPrograms | 38abcfd94e21b7cc8c323760a313cf3930bb07b2 | 185152b7644462d9dc75a1c91aedf2cf13da5015 | refs/heads/master | 2021-04-30T15:09:54.311064 | 2018-10-31T09:50:57 | 2018-10-31T09:50:57 | 121,232,711 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# The Loop control statements change the execution from its normal sequence. When the execution leaves a scope,
# all automatic objects that were created in that scope are destroyed.
# There are 3 loop control statements:
# 1. break, 2. continue, 3. pass
# BREAK STATEMENT
# The break statement is used for premature termination of the current loop. After abandoning the loop,
# execution at the next statement is resumed, just like the traditional break statement in C.
# The most common use of break is when some external condition is triggered requiring a hasty exit from a loop.
# The break statement can be used in both while and for loops.
# If you are using nested loops, the break statement stops the execution of the innermost loop and
# starts executing the next line of the code after the block.
print ('Break Statement:');
my_num=int(input('any number: '))
numbers=[11,33,55,39,55,75,37,21,23,41,13]
print ('list', numbers);
for num in numbers:
if num==my_num:
print ('number',my_num,'found in list')
break
else:
print ('number',my_num,'not found in list')
# CONTINUE STATEMENT
# The continue statement in Python returns the control to the beginning of the current loop.
# When encountered, the loop starts next iteration without executing the remaining statements in the current iteration.
# The continue statement can be used in both while and for loops.
print ('\nContinue Statement:');
var = 10 # Second Example
while var > 0:
var = var - 1;
if var == 5:
print ("var == 5, so continue..")
continue
print ('Current variable value :', var)
# PASS STATEMENT
# It is used when a statement is required syntactically but you do not want any command or code to execute.
# The pass statement is a null operation; nothing happens when it executes. The pass statement is also
# useful in places where your code will eventually go, but has not been written yet i.e. in stubs).
print ('\nPass Statement')
for letter in 'Python':
if letter == 'h':
pass
print ('This is pass block')
print ('Current Letter :', letter)
| UTF-8 | Python | false | false | 2,145 | py | 55 | 18-loop-cntl-statements.py | 53 | 0.715152 | 0.699767 | 0 | 48 | 43.583333 | 119 |
tfmorris/wrangler | 17,403,207,492,564 | 12c5eb3299d16a40d256bef5ea583d2df0235951 | fa96af5a7b13ef0c79c134f0f38e0c8beb2ad4ac | /runtime/python/setup.py | e702f0502cbad601b0e56cec8a3eae338e062094 | []
| no_license | https://github.com/tfmorris/wrangler | 0eda4610207128047187be427d53d90958860021 | 0ae804edd84cb8040e1c93cc42222e0a41a4e94a | refs/heads/master | 2021-01-24T00:24:50.076635 | 2013-06-18T20:55:05 | 2013-06-18T20:55:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup, find_packages
setup(
name = "DataWrangler",
version = "0.2",
packages = find_packages(),
url = 'http://www.stanford.edu/~skandel/platform/python/DataWrangler-0.1.tar.gz',
download_url = 'http://www.stanford.edu/~skandel/platform/python/DataWrangler-0.1.tar.gz',
) | UTF-8 | Python | false | false | 307 | py | 45 | setup.py | 27 | 0.70684 | 0.687296 | 0 | 8 | 37.5 | 91 |
Jiongxiao/Leetcode | 11,020,886,095,455 | 37aae13cee1e1b21016556550145d08b04f9069f | 4c67533d6d5183ed985288a55631fe1c99b5ae21 | /413_Arithmetic_Slices.py | 36793499a7ff658d7a1d929d67303960379edaa1 | []
| no_license | https://github.com/Jiongxiao/Leetcode | 546f789a0d892fe56d7f53a41aa97ccb2a8e1813 | 641775f750a1197f9aaa23e5122b0add2ae064ee | refs/heads/master | 2021-01-17T01:11:35.970423 | 2017-09-21T07:04:52 | 2017-09-21T07:04:52 | 56,422,836 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def numberOfArithmeticSlices(self, A):
"""
:type A: List[int]
:rtype: int
"""
n=len(A)
if n<3:
return 0
res=0
tail=[0]*n
if A[2]+A[0]==A[1]<<1:
tail[2]=1
res+=1
for i in range(3,n):
if A[i]+A[i-2]==A[i-1]<<1:
tail[i]=tail[i-1]+1
res+=tail[i]
return res
| UTF-8 | Python | false | false | 450 | py | 128 | 413_Arithmetic_Slices.py | 128 | 0.375556 | 0.337778 | 0 | 20 | 21.5 | 42 |
leftomelas/funny-morse | 14,078,902,827,192 | 5c67387916db086ba3198bdb25fadefb9379c0e8 | fb82083e434c42ed6220a866183488aa8a7807cf | /funny_morse/show.py | e37ec4d4b824397c4bf0c1685282299851b2f77a | [
"MIT"
]
| permissive | https://github.com/leftomelas/funny-morse | 6d4c7b35fb4f1b7f588c499cb61e9cab61e485ce | c50c9165ff739bd6da45cdc486e597742d8a1fea | refs/heads/master | 2023-08-11T15:46:40.680829 | 2020-07-15T08:10:44 | 2020-07-15T08:10:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter
import morse
TITLE = "funny-morse"
GEOMETRY = "" # Default
WPM = morse.WPM
FS = morse.FS
def info(title=-1, geometry=-1):
title = TITLE if title == -1 else title
geometry = GEOMETRY if geometry == -1 else geometry
print("Window :")
print(" Title =", title)
print(" Geometry =", repr(geometry))
def main(message, wpm=-1, fs=-1, title=-1, geometry=-1):
wpm = WPM if wpm == -1 else wpm
fs = FS if fs == -1 else fs
title = TITLE if title == -1 else title
geometry = GEOMETRY if geometry == -1 else geometry
code = morse.stringToMorse(message)
flip_intervals = morse.morseToFlipIntervals(code, wpm, fs)
window = Window(flip_intervals, title, geometry)
window.mainloop()
class Window(tkinter.Tk):
def __init__(self, flip_intervals, title=-1, geometry=-1):
title = TITLE if title == -1 else title
geometry = GEOMETRY if geometry == -1 else geometry
self.flip_intervals = flip_intervals
self.end = len(flip_intervals)
self.i = 0
self.previous = False
super().__init__()
self.title(title)
self.geometry(geometry)
self.configure(bg="#000000")
self.update()
def flip(self):
self.configure(bg="#000000" if self.previous else "#ffffff")
self.previous = not self.previous
self.i += 1
self.update()
def update(self):
if self.i >= self.end:
self.destroy()
else:
sleep = self.flip_intervals[self.i]
self.after(sleep, self.flip)
| UTF-8 | Python | false | false | 1,597 | py | 10 | show.py | 8 | 0.589856 | 0.571071 | 0 | 56 | 27.517857 | 68 |
avvarga/Lab206 | 12,910,671,731,985 | bb0f7a0e59fc30032433c0742202386778cc65cf | 9ee644fdb3b0a5ae7887ecb1f4a9013b3abbcb8b | /Python/OOP/car.py | 17104afd7deeb4c779ff70c423641f088128c5fc | []
| no_license | https://github.com/avvarga/Lab206 | 0aa0bf7f1e0340466c5e120e7c1a8726f9385567 | c55fae4044625355ec392f3de58442b2767f7916 | refs/heads/master | 2021-04-15T15:42:11.700351 | 2018-04-13T23:07:25 | 2018-04-13T23:07:25 | 126,499,170 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class car(object):
def __init__ (self,price,speed,fuel,mileage):
self.price=price
self.speed=speed
self.fuel=fuel
self.mileage=mileage
if price>10000:
self.tax= 0.15
else:
self.tax= 0.12
print self.display_all()
def display_all(self):
print "Price:",self.price
print "Speed:",self.speed,"mph"
print "Fuel:",self.fuel
print "Mileage:",self.mileage,"mpg"
print "Tax:",self.tax
# return self
car1= car(2000,35,"Full",15)
car2= car(2000,5,"Not Full",105)
car3= car(2000,15,"Kind of Full",95)
car4= car(2000,25,"Full",25)
car5= car(2000,45,"Empty",25)
car6= car(2000000,35,"Empty",15) | UTF-8 | Python | false | false | 725 | py | 124 | car.py | 76 | 0.569655 | 0.475862 | 0 | 34 | 20.352941 | 49 |
romonzaman/piphone | 13,417,477,860,831 | 8dacd17c3195da5ab5fd83175d402f0b9cb18291 | 75d270d2a1f52a5977707f012dc6ff460e4689f5 | /oled_test.py | 7c7fd502751b953a8d9ac28e58e3a1a1c38a792e | []
| no_license | https://github.com/romonzaman/piphone | b8570dd6ce7ddfdbccd81f035085994c685d99f0 | 398098813f9aa609f4009f9dc35a956e93cfee4d | refs/heads/main | 2023-08-17T06:30:57.047861 | 2021-09-12T03:41:49 | 2021-09-12T03:41:49 | 405,401,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import board
import busio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
import time
i2c = busio.I2C(board.SCL, board.SDA)
oled = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c)
def oled_white():
oled.fill(250)
oled.show()
def oled_black():
oled.fill(0)
oled.show()
oled_black()
oled_white()
time.sleep(2)
oled_black()
time.sleep(1)
oled_white()
time.sleep(2)
oled_black()
time.sleep(1)
oled_white()
time.sleep(2)
oled_black()
time.sleep(1)
oled_white()
time.sleep(2)
oled_black()
time.sleep(1)
oled_white()
| UTF-8 | Python | false | false | 548 | py | 3 | oled_test.py | 2 | 0.698905 | 0.638686 | 0 | 39 | 13.051282 | 49 |
jpgil/logdelay | 14,894,946,588,601 | ca5130b09cca4c724ab1557d0fd089f8f53ec49c | 5ce28e39b5971e9c80b0c03456f304691effc847 | /src/helpers.py | 68ab687fe6ecef72d2a4b2aa3f42eb83190c7a6a | [
"Apache-2.0"
]
| permissive | https://github.com/jpgil/logdelay | 0aefbf9efdbebc9cb028cbe7fc1d3ba27e7af029 | 788e9402d63cecc732822ac49a39dd961b63219f | refs/heads/master | 2022-09-09T20:24:55.507441 | 2021-06-02T07:41:33 | 2021-06-02T07:41:33 | 228,706,066 | 0 | 0 | Apache-2.0 | false | 2022-08-10T18:58:07 | 2019-12-17T21:38:21 | 2021-06-02T07:41:42 | 2022-08-10T18:58:06 | 13,961 | 0 | 0 | 6 | Jupyter Notebook | false | false | from networkx.drawing.nx_agraph import write_dot
from networkx.drawing.nx_agraph import to_agraph
from IPython.display import Image
import pygraphviz as pgv
def graph(G, color="#cccccc", filename="/tmp/simple.png"):
for u, v in G.edges:
if "weight" in G[u][v]:
G[u][v]["label"] = G[u][v]["weight"]
G.graph['graph']={'rankdir':'TD'}
G.graph['node']={'shape':'circle'}
G.graph['edges']={'arrowsize':'1.0'}
A = to_agraph(G)
A.layout('dot')
A.draw(filename)
display(Image(filename))
def count_successor_pairs( T ):
logger.debug("Extracting pairs for %d elements in T=%s" % ( len(T), T[:20]) )
pairs = []
partial_subtrace = T[:]
while len(partial_subtrace):
s_i = partial_subtrace.pop(0)
pairs += [ (s_i, s_i) ] + [ (s_i, s_j) for s_j in partial_subtrace ]
logger.debug("Pairs for found = %d" % ( len(pairs) ) )
return pairs | UTF-8 | Python | false | false | 1,011 | py | 55 | helpers.py | 14 | 0.541048 | 0.536103 | 0 | 34 | 28.764706 | 84 |
roniwinik/Drivers | 9,766,755,642,504 | b68eab1631b483e8e69cacf6e406dfc0e77e2778 | b7de4effae566d88add33a9cd9656283167c960c | /Noise_Generator/Noise_Generator.py | a7b9ce29425325123e5a466d5e3622c7eff9e974 | [
"MIT"
]
| permissive | https://github.com/roniwinik/Drivers | 3212122ffe52aa801223bbf2ed0f4f2ad047c828 | ba473bc21d1b5321da1e6caadec5b4d624282edc | refs/heads/master | 2021-06-25T06:54:26.563656 | 2020-03-23T21:45:13 | 2020-03-23T21:45:13 | 134,781,611 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import pickle
import InstrumentDriver
import numpy as np
import ast
import sys, os
from scipy.fftpack import fft, ifft
import scipy.io as sio
def loadData(file_path):
"""
Load a log file. (Use the built-in pickle module)
Parameters
----------
file_path: str
path of the log file
Returns
-------
- data: arbitrary object
arbitrary Python object which contains data
"""
import pickle
with open(file_path, 'rb') as _input:
data = pickle.load(_input)
return data
def white(w, S0=1.):
return S0 * np.ones_like(w)
def lorentz(w, S0=1., wc=1., w0=0.):
""" Lorentzian spectrum. """
return S0/(2*np.pi*wc)*(1./(1+((w-w0)/wc)**2) + 1./(1+((w+w0)/wc)**2))
def genProcess(times, spectrum, T0):
""" Generate one realization of a Gaussian random process. """
# times: numpy array containing all the times for which to generate the process
# spectrum: numpy array containing the spectrum at all the harmonics omega_k = 2*pi*k/T0
# T0: period of the noise process. Has to be much longer than the duration of an
# experimental run
# returns: numpy array containing all the values of the random process for the times
# specified in input
# Check that T0 is longer than the largest specified time
if (T0 < max(times)):
print("WARNING: T0 should be the longest timescale.")
# Number of frequencies and times
nfreqs = max(np.shape(spectrum))
ntimes = max(np.shape(times))
# Generation of harmonics and random Fourier coefficients
vec_omega = 2*np.pi/T0 * np.arange(1,nfreqs+1)
vec_sigma = np.sqrt(2.*spectrum/T0)
vec_a = vec_sigma * np.random.normal(size=nfreqs)
vec_b = vec_sigma * np.random.normal(size=nfreqs)
# Matrix with times in rows
mat_times = np.array(np.repeat(times[np.newaxis],nfreqs,0))
# Matrix with frequencies in columns
mat_freqs = np.array(np.repeat(np.transpose(vec_omega[np.newaxis]),ntimes,1))
# Sum up the Fourier series
mat_cos = np.cos(mat_freqs * mat_times);
cos_term = np.dot(np.reshape(vec_a,(1,nfreqs)), mat_cos)
mat_sin = np.sin(mat_freqs * mat_times);
sin_term = np.dot(np.reshape(vec_b,(1,nfreqs)), mat_sin)
return (cos_term + sin_term)[0]
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements a Single-qubit pulse generator"""
def performOpen(self, options={}):
self.vEnvelope = np.array([], dtype=float)
self.vNoise_Time = np.array([], dtype=float)
self.vNoise_Time_Modulated = np.array([], dtype=float)
self.vNoise_Freq = np.array([], dtype=float)
self.vNoise_Freq_FFT = np.array([], dtype=float)
self.vTime = np.array([], dtype=float)
self.vFreq = np.array([], dtype=float)
self.vFreq_AWG_frame = np.array([], dtype=float)
self.vBinEdges = np.array([], dtype=float)
self.vHistogram = np.array([], dtype=float)
self.PrevNoiseIndex = -9.99e-9
self.saved_data = None
self.original_S0 = 0
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# do nothing, just return value
if quant.name == 'Read file':
self.Readfile()
elif quant.name == 'Read file when start':
if (value == True):
self.Readfile()
return value
def Readfile(self):
# self.log('Read File')
file_path = self.getValue("File path")
self.saved_data = loadData(file_path)
self.setValue("T0", self.saved_data["T0"])
self.setValue("High Cutoff Freq.", self.saved_data["High Cutoff Freq."])
self.original_S0 = self.saved_data["S0"]
self.setValue("Noise Power", self.saved_data["S0"])
self.setValue("Center Freq.", self.saved_data["Center Freq."])
self.setValue("HWHM", self.saved_data["HWHM"])
self.reportStatus('Successfully Read Data File!')
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
# check type of quantity
if quant.isVector():
noise_indx = self.getValue("Noise Index")
self.calculateNoise()
# if self.PrevNoiseIndex != noise_indx:
# self.calculateNoise()
# self.PrevNoiseIndex = noise_indx
# traces, check if waveform needs to be re-calculated
if self.isConfigUpdated():
self.calculateWaveform()
# if ((quant.name == 'Trace - Modulated Noise (Time-Domain)') & (len(self.vEnvelope) != len(self.vNoise_Time))):
# self.calculateNoise()
# get correct data and return as trace dict
vData = self.getWaveformFromMemory(quant)
length = len(vData)
if (self.getValue("Turn Off Noise") == True):
vData = np.zeros(length)
if quant.name == 'Trace - Noise (Histogram)':
value = quant.getTraceDict(vData, x=self.vBinEdges)
elif quant.name == 'Trace - Noise (Freq-Domain, FFT)':
value = quant.getTraceDict(vData, x=self.vFreq_AWG_frame)
elif quant.name == 'Trace - Noise (Freq-Domain, Original)':
value = quant.getTraceDict(vData, x=self.vFreq)
# elif quant.name == 'Trace - Envelope':
# dt = 1/self.getValue('Sample rate')
# value = quant.getTraceDict(self.vEnvelope, dt = dt)
else:
dt = 1/self.getValue('Sample rate')
value = quant.getTraceDict(vData, dt=dt)
else:
# for all other cases, do nothing
value = quant.getValue()
return value
def getWaveformFromMemory(self, quant):
dTrace = { 'Trace - Noise (Time-Domain)': self.vNoise_Time,
'Trace - Noise (Freq-Domain, Original)': self.vNoise_Freq,
# 'Trace - Noise (Time-Domain, RAW)': self.vNoise_Time_RAW,
'Trace - Noise (Freq-Domain, FFT)': self.vNoise_Freq_FFT,
'Trace - Noise (Histogram)': self.vHistogram,
'Trace - Envelope': self.vEnvelope,
'Trace - Modulated Noise (Time-Domain)': self.vNoise_Time * self.vEnvelope if (len(self.vNoise_Time) == len(self.vEnvelope)) else []
# 'Trace - Noise (Time-Domain, Histogram, RAW)': self.vTime_Histogram_RAW,
}
vData = dTrace[quant.name]
return vData
def generateNoise(self):
"""Get the waveform of a given noise"""
# get params
nPoints = float(self.getValue('Number of points'))
dSampleRate = self.getValue('Sample rate')
str_NoiseType = self.getValue('Noise type')
index = int(self.getValue('Noise Index'))
use_interpolation = self.getValue('Use Interpolation')
if (str_NoiseType == 'Custom'):
vec_time = np.linspace(0, (nPoints-1)*1/dSampleRate,nPoints)
vec_freq_AWG_frame = np.linspace(0.0, 1.0/(2.0/dSampleRate), nPoints*0.5)
if (self.saved_data is None):
self.reportStatus('No Data Available')
return
vec_T_noise = np.zeros_like(vec_time)
index = np.clip(index, 0, len(self.saved_data['mat_T_noise'])-1)
self.log('shape of mat_T_noise: ' + str(self.saved_data['mat_T_noise'].shape))
vec_T_noise_data = np.copy(self.saved_data['mat_T_noise'][index]) * np.sqrt(self.getValue('Noise Power')/self.original_S0)
self.log( self.saved_data['mat_T_noise'][index])
if (len(vec_T_noise_data) >= len(vec_time)):
vec_T_noise = vec_T_noise_data[:len(vec_time)]
else:
N_copy = int(len(vec_time) / len(vec_T_noise_data))
for i in range(N_copy):
vec_T_noise[i*len(vec_T_noise_data):(i+1)*len(vec_T_noise_data)] = vec_T_noise_data
vec_T_noise[(i+1)*len(vec_T_noise_data):] = vec_T_noise_data[0:len(vec_time)-(i+1)*len(vec_T_noise_data)]
vec_F_noise = self.saved_data['vec_F_noise']
vec_freq = self.saved_data['vec_freq']
else:
np.random.seed(int(index))
T0 = self.getValue('T0')
dHighCutoffFreq = self.getValue('High Cutoff Freq.')
if use_interpolation:
interval = self.getValue('Interpolation Interval')
dSampleRate = int(dSampleRate / interval)
nPoints = int(nPoints / self.getValue('Sample rate') * dSampleRate)
# self.log(T0, dSampleRate, nPoints)
vec_time = np.linspace(0, (nPoints-1)*1/dSampleRate,nPoints)
vec_freq_AWG_frame = np.linspace(0.0, 1.0/(2.0/dSampleRate), nPoints*0.5)
S0 = self.getValue('S0')
freq_step = 1/T0
N_freq = int(dHighCutoffFreq / freq_step)
vec_freq = np.linspace(freq_step, dHighCutoffFreq, N_freq)
if str_NoiseType == 'White':
"""Generate Gaussian Noise Signal(mean = 0, sigma = sqrt(Power Spectral Density))"""
vec_F_noise = white(2 * np.pi * vec_freq, S0= S0)
vec_T_noise = genProcess(vec_time, vec_F_noise, T0)
elif str_NoiseType == 'Squared-Gaussian':
"""Generate Squared Gaussian Noise Signal"""
vec_F_noise = white(2 * np.pi * vec_freq, S0= S0)
vec_T_noise = genProcess(vec_time, vec_F_noise, T0) **2
elif str_NoiseType == 'Lorentzian':
f0 = self.getValue('Center Freq.')
fwidth = self.getValue('HWHM')
vec_F_noise = lorentz(2 * np.pi * vec_freq , S0= S0, wc = 2 * np.pi * fwidth , w0 = 2 * np.pi * f0)
vec_T_noise = genProcess(vec_time, vec_F_noise, T0)
#Filter noise in Time-Window
dNoiseStart = self.getValue('Noise Start Time')
dNoiseEnd = self.getValue('Noise End Time')
index_Empty = np.where((vec_time < dNoiseStart) | (vec_time > dNoiseEnd))
vec_T_noise[index_Empty] = 0
bln_hist_FFT = self.getValue('Generate Histogram and FFT')
d_bincounts = self.getValue('Histogram Bin Counts')
vec_F_noise_FFT = []
vec_histogram = []
vec_binedges = []
if (use_interpolation == True):
nPoints = float(self.getValue('Number of points'))
dSampleRate = self.getValue('Sample rate')
vec_time_interp = np.linspace(0, (nPoints-1)*1/dSampleRate,nPoints)
vec_freq_AWG_frame = np.linspace(0.0, 1.0/(2.0/dSampleRate), nPoints*0.5)
interp_T_noise = np.interp(vec_time_interp, vec_time, vec_T_noise)
vec_time = np.copy(vec_time_interp)
vec_T_noise = np.copy(interp_T_noise)
if (bln_hist_FFT):
vec_histogram, vec_binedges = np.histogram(vec_T_noise, bins = int(d_bincounts))
vec_F_noise_FFT = 2.0 / len(vec_time) * np.abs(fft(vec_T_noise).flatten())
else:
vec_F_noise_FFT = np.zeros_like(vec_freq_AWG_frame)
return ({
"vec_time": vec_time,
"vec_freq": vec_freq,
"vec_freq_AWG_frame": vec_freq_AWG_frame,
"vec_T_noise": vec_T_noise,
"vec_F_noise_FFT": vec_F_noise_FFT[:len(vec_time)//2],
"vec_F_noise":vec_F_noise,
"vec_histogram": vec_histogram,
"vec_binedges": vec_binedges[0:int(d_bincounts)],
})
def calculateNoise(self):
"""Generate noise waveform"""
# get config values
nPoints = int(self.getValue('Number of points'))
sampleRate = self.getValue('Sample rate')
# start with allocating time and amplitude vectors
self.vTime = np.arange(nPoints, dtype=float)/sampleRate
# get noise waveform
result= self.generateNoise()
if (result is None):
return
self.vTime = result["vec_time"]
self.vFreq = result["vec_freq"]
self.vFreq_AWG_frame = result["vec_freq_AWG_frame"]
self.vBinEdges = result["vec_binedges"]
self.vNoise_Time = result["vec_T_noise"]
self.vNoise_Freq = result["vec_F_noise"]
self.vNoise_Freq_FFT = result["vec_F_noise_FFT"]
self.vHistogram = result["vec_histogram"]
def calculateWaveform(self):
"""Generate waveforms, including pre-pulses, readout and gates"""
# get config values
nPoints = int(self.getValue('Number of points'))
sampleRate = self.getValue('Sample rate')
firstDelay = self.getValue('First pulse delay')
# start with allocating time and amplitude vectors
self.vTime = np.arange(nPoints, dtype=float)/sampleRate
# create list of output vectors
self.vEnvelope = np.zeros_like(self.vTime)
# go on depending on with sequence
self.generateSequence(startTime=firstDelay)
def generateSequence(self, startTime):
# get config values
sSequence = self.getValue('Sequence')
nPulses = int(self.getValue('# of pulses'))
seqPeriod = self.getValue('Pulse period')
# go on depending on waveform
if sSequence == 'CP/CPMG':
# get length of actual pulses
dPulseT1 = self.getPulseDuration(1)
dPulseT2 = self.getPulseDuration(2)
dPulseTot = 2*dPulseT1 + dPulseT2*nPulses
# add the first pi/2 pulses
self.addPulse(1, startTime + dPulseT1/2)
# add more pulses
if nPulses <= 0:
# no pulses = ramsey
vTimePi = []
# second pi/2 pulse
self.addPulse(1, startTime + seqPeriod + dPulseTot - dPulseT1/2)
elif nPulses == 1:
# one pulse, echo experiment
vTimePi = [startTime + dPulseT1 + seqPeriod/2 + dPulseT2/2]
# second pi/2 pulse
self.addPulse(1, startTime + seqPeriod + dPulseTot - dPulseT1/2)
elif nPulses > 1:
# figure out timing of pi pulses
vTimePi = startTime + dPulseT1 + seqPeriod/2 + dPulseT2/2 + \
(seqPeriod + dPulseT2)*np.arange(nPulses)
# second pi/2 pulse
self.addPulse(1, startTime + nPulses*seqPeriod + dPulseTot - dPulseT1/2)
# add pi pulses, one by one
for dTimePi in vTimePi:
self.addPulse(2, dTimePi)
elif sSequence == 'Generic sequence':
# generic pulse sequence, add the pulses specified in the pulse list
t = startTime
for n in range(nPulses):
pulseType = 1 + (n % 8)
# get length of current pulse
dPulseT = self.getPulseDuration(pulseType)
self.addPulse(pulseType, t + dPulseT/2)
# add spacing as defined for this pulse
t += dPulseT + self.getValue('Spacing #%d' % (pulseType))
def addPulse(self, nType, dTime, nOutput=None, bTimeStart=False, phase=None):
"""Add pulse to waveform"""
vTime, vPulse, vIndx = self.getPulseEnvelope(nType, dTime, bTimeStart)
if len(vTime) == 0:
return
self.vEnvelope[vIndx] += vPulse
def getPulseEnvelope(self, nType, dTime, bTimeStart=False):
"""Get pulse envelope for a given pulse"""
sPulseType = self.getValue('Pulse type')
dSampleRate = self.getValue('Sample rate')
truncRange = self.getValue('Truncation range')
start_at_zero = self.getValue('Start at zero')
# get pulse params
dAmp = self.getValue('Amplitude #%d' % nType)
dWidth = self.getValue('Width #%d' % nType)
dPlateau = self.getValue('Plateau #%d' % nType)
# get pulse width
if sPulseType == 'Square':
dTotTime = dWidth+dPlateau
elif sPulseType == 'Ramp':
dTotTime = 2*dWidth+dPlateau
elif sPulseType == 'Gaussian':
dTotTime = truncRange*dWidth + dPlateau
# shift time to mid point if user gave start point
if bTimeStart:
dTime = dTime + dTotTime/2
# get the range of indices in use
vIndx = np.arange(max(np.round((dTime-dTotTime/2)*dSampleRate), 0),
min(np.round((dTime+dTotTime/2)*dSampleRate), len(self.vTime)))
vIndx = np.int0(vIndx)
self.log(len(vIndx), len(self.vTime))
# calculate time values for the pulse indices
vTime = vIndx/dSampleRate
# calculate the actual value for the selected indices
if sPulseType == 'Square':
vPulse = (vTime >= (dTime-(dWidth+dPlateau)/2)) & \
(vTime < (dTime+(dWidth+dPlateau)/2))
elif sPulseType == 'Ramp':
# rising and falling slopes
vRise = (vTime-(dTime-dPlateau/2-dWidth))/dWidth
vRise[vRise<0.0] = 0.0
vRise[vRise>1.0] = 1.0
vFall = ((dTime+dPlateau/2+dWidth)-vTime)/dWidth
vFall[vFall<0.0] = 0.0
vFall[vFall>1.0] = 1.0
vPulse = vRise * vFall
# vPulse = np.min(1, np.max(0, (vTime-(dTime-dPlateau/2-dWidth))/dWidth)) * \
# np.min(1, np.max(0, ((dTime+dPlateau/2+dWidth)-vTime)/dWidth))
elif sPulseType == 'Gaussian':
# width is two times std
#dStd = dWidth/2;
# alternate def; std is set to give total pulse area same as a square
dStd = dWidth/np.sqrt(2*np.pi)
# cut the tail part and increase the amplitude, if necessary
dOffset = 0
if dPlateau > 0:
# add plateau
vPulse = (vTime >= (dTime-dPlateau/2)) & \
(vTime < (dTime+dPlateau/2))
if dStd > 0:
# before plateau
vPulse = vPulse + (vTime < (dTime-dPlateau/2)) * \
(np.exp(-(vTime-(dTime-dPlateau/2))**2/(2*dStd**2))-dOffset)/(1-dOffset)
# after plateau
vPulse = vPulse + (vTime >= (dTime+dPlateau/2)) * \
(np.exp(-(vTime-(dTime+dPlateau/2))**2/(2*dStd**2))-dOffset)/(1-dOffset)
else:
if dStd > 0:
vPulse = (np.exp(-(vTime-dTime)**2/(2*dStd**2))-dOffset)/(1-dOffset)
else:
vPulse = np.zeros_like(vTime)
# # add the pulse to the previous ones
# vY[iPulse] = vY[iPulse] + dAmp * vPulse
vPulse = dAmp * vPulse
if start_at_zero:
vPulse = vPulse - vPulse.min()
vPulse = vPulse/vPulse.max()*dAmp
# return both time, envelope, and indices
return (vTime, vPulse, vIndx)
def getPulseDuration(self, nType):
"""Get total pulse duration waveform, for timimg purposes"""
# check if edge-to-edge
if self.getValue('Edge-to-edge pulses'):
width = self.getValue('Width #%d' % nType)
plateau = self.getValue('Plateau #%d' % nType)
pulseEdgeWidth = self.getValue('Edge position')
return pulseEdgeWidth * width + plateau
else:
return 0.0
| UTF-8 | Python | false | false | 19,576 | py | 5 | Noise_Generator.py | 4 | 0.566152 | 0.554914 | 0 | 454 | 42.118943 | 150 |
aitoralmeida/intellidata | 14,697,378,121,820 | b4fffaacc784f07ba742820aa58e3b97738836bd | 3f16f572ab972792be2cff2789fb6a0629088564 | /mongo_import.py | ed2b5f0d8118f7cd0dc4261243452fad02a13ff6 | [
"Apache-2.0"
]
| permissive | https://github.com/aitoralmeida/intellidata | 13512827fb73cbb762345b6e5331c6d0a1b863b4 | 7a0ebdb9f9097aae4410e589646312ad3e24e66a | refs/heads/master | 2021-01-13T01:54:14.315721 | 2014-02-10T11:15:10 | 2014-02-10T11:15:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import json
import glob
from bbvalib import create_mongoclient
DELETE_ALL = True
TOP_CLIENTS = True
SHOP_ZIPCODE_SUMMARY = True
#############################################################################
#
#
# Import flat data (regular tables)
#
#
def process_origin_flat():
db = create_mongoclient()
db.drop_collection('top_clients_week')
db.drop_collection('top_clients_month')
origin_files = glob.glob("data/scraped/origin-*.json")
for f in origin_files:
_, by, category, shop_zipcode = os.path.basename(f)[:-5].split('-')
contents = json.load(open(f))
stats = contents['data']['stats']
for period in stats:
cur_date = period['date']
for home_zipcode_data in period['zipcodes']:
cur_data = dict(
category = category,
shop_zipcode = shop_zipcode,
home_zipcode = home_zipcode_data['label'],
incomes = home_zipcode_data['incomes'],
num_cards = home_zipcode_data['num_cards'],
num_payments = home_zipcode_data['num_payments'],
)
cur_data[by] = cur_date
if by == 'week':
db.top_clients_week.insert(cur_data)
elif by == 'month':
db.top_clients_month.insert(cur_data)
else:
print "Error: %s is not (month, key)" % by
def process_cube_flat():
db = create_mongoclient()
db.drop_collection('cube_week')
db.drop_collection('cube_month')
genders = {
'U' : 'unknown',
'F' : 'female',
'M' : 'male',
'E' : 'enterprise'
}
ages = {
'0' : {
'min_age' : 0,
'max_age' : 18,
},
'1' : {
'min_age' : 19,
'max_age' : 25
},
'2' : {
'min_age' : 26,
'max_age' : 35
},
'3' : {
'min_age' : 36,
'max_age' : 45
},
'4' : {
'min_age' : 46,
'max_age' : 55,
},
'5' : {
'min_age' : 56,
'max_age' : 65
},
'6' : {
'min_age' : 66,
'max_age' : 100
},
'U' : {
'min_age' : 0,
'max_age' : 100
}
}
origin_files = glob.glob("data/scraped/cube-*.json")
for f in origin_files:
# cube-week-es_auto-28004.json
_, by, category, shop_zipcode = os.path.basename(f)[:-5].split('-')
contents = json.load(open(f))
stats = contents['data']['stats']
for period in stats:
cur_date = period['date']
for cube in period['cube']:
gender_code, age_code = cube['hash'].split('#')
cur_data = dict(
category = category,
shop_zipcode = shop_zipcode,
hash = cube['hash'],
gender = genders[gender_code],
age_code = age_code,
min_age = ages[age_code]['min_age'],
max_age = ages[age_code]['max_age'],
avg = cube['avg'],
num_cards = cube['num_cards'],
num_payments = cube['num_payments'],
)
cur_data[by] = cur_date
if by == 'week':
db.cube_week.insert(cur_data)
elif by == 'month':
db.cube_month.insert(cur_data)
else:
print "Error: %s is not in (week, month)" % by
def process_patterns_flat():
db = create_mongoclient()
db.drop_collection('patterns_month')
origin_files = glob.glob("data/scraped/patterns-*.json")
for f in origin_files:
# patterns-month-es_barsandrestaurants-28004.json
_, by, category, shop_zipcode = os.path.basename(f)[:-5].split('-')
if by != 'month':
print "Error: %s is not in (month)" % by
continue
contents = json.load(open(f))
stats = contents['data']['stats']
for period in stats:
for day_data in period['days']:
cur_data = dict(
month = period['date'],
category = category,
shop_zipcode = shop_zipcode,
key = '%s#%s' % (day_data['day'].lower(), period['date']),
day = day_data['day'].lower(),
mode = day_data['mode'],
num_payments = day_data['num_payments'],
avg = day_data['avg'],
std = day_data['std'],
min = day_data['min'],
max = day_data['max'],
num_cards = day_data['num_cards'],
hours = day_data['hours'],
)
db.patterns_month.insert(cur_data)
if DELETE_ALL:
print "Recreating origin flat"
process_origin_flat()
print "Recreating cube flat"
process_cube_flat()
print "Recreating patterns flat"
process_patterns_flat()
#############################################################################
#
#
# Aggregate data (map reduce)
#
#
def top_clients_summary():
from bson.code import Code
from bson.son import SON
db = create_mongoclient()
db.drop_collection('top_clients_summary')
categories = db.top_clients_week.find().distinct('category')
categories = json.dumps(categories)
map_func = Code("""function () {
// Create the summaries (one with the data, the other empty)
var summary = {
'incomes' : this.incomes,
'num_cards' : this.num_cards,
'num_payments' : this.num_payments
};
var empty_summary = {
'incomes' : 0,
'num_cards' : 0,
'num_payments' : 0
};
// self will be useful in inner functions
var self = this;
// Select by (per_week, per_month) and the current date (the week or the month)
var by, date, empty_by;
if (this.week === undefined) {
if (this.month === undefined ) {
throw new Error("this.month or this.week must exist");
} else {
// Month exists; week doesn't
by = 'per_month';
empty_by = 'per_week';
date = this.month;
}
} else {
// Week exists
by = 'per_week';
empty_by = 'per_month';
date = this.week;
}
// Start adding data
// Create the basic infrastructure, and add the register with this zipcode
var value = {
'home_zipcodes' : {}
};
value['home_zipcodes'][this.home_zipcode] = {
'per_week' : {
'total' : {
// Later on, it will have this:
// total: (incomes, num_cards, num_payments)
// category1 : (incomes, num_cards, num_payments)
// category2 : (incomes, num_cards, num_payments)
}
},
'per_month' : {
'total' : {
// Later on, it will have this:
// total: (incomes, num_cards, num_payments)
// category1 : (incomes, num_cards, num_payments)
// category2 : (incomes, num_cards, num_payments)
}
}
};
// Right now, we don't have added any data. We start now.
// Fill all the categories (except for "total") of total with zeros
%(CATEGORIES)s.forEach(function(category) {
value['home_zipcodes'][self.home_zipcode]['per_week']['total'][category] = empty_summary;
value['home_zipcodes'][self.home_zipcode]['per_month']['total'][category] = empty_summary;
});
// Add the total value
value['home_zipcodes'][self.home_zipcode][by]['total']['total'] = summary;
value['home_zipcodes'][self.home_zipcode][empty_by]['total']['total'] = empty_summary;
// Add the category value (it existed, but was initialized to empty)
value['home_zipcodes'][self.home_zipcode][by]['total'][this.category] = summary;
//
// Now, total has been filled. We start with each week (or month)
//
value['home_zipcodes'][self.home_zipcode][by][date] = {
'total' : summary
// It will also be filled with:
// category1: (incomes, num_cards, num_payments)
// category2: (incomes, num_cards, num_payments)
};
// First, we clear all the categories (except for total):
%(CATEGORIES)s.forEach(function(category) {
value['home_zipcodes'][self.home_zipcode][by][date][category] = empty_summary;
});
// Then, we add the category data
value['home_zipcodes'][this.home_zipcode][by][date][this.category] = summary;
emit(this.shop_zipcode, value);
}""" % dict(
CATEGORIES = categories,
))
reduce_func = Code("""function (key, values) {
var deepcopy = function (obj) {
return JSON.parse(JSON.stringify(obj));
}
var return_value = {
'home_zipcodes' : {}
};
var return_home_zipcodes = return_value['home_zipcodes'];
values.forEach(function(value) {
/* Each value has the following structure:
{
'home_zipcodes' : {
'28004' : {
'per_week' : {
'total' : {
'category1' : {
income, num_cards, num_payments
},
'category2' : {
income, num_cards, num_payments
},
'total' : {
income, num_cards, num_payments
}
},
'201301' : {
'category1' : {
income, num_cards, num_payments
},
'category2' : {
income, num_cards, num_payments
},
'total' : {
income, num_cards, num_payments
}
}
}
'per_month' : (same as in per_week)
}
}
}
*/
// For each zipcode in the value
for (var home_zipcode in value['home_zipcodes']) {
// If that zipcode was previously unknown, just copy the data.
if (return_home_zipcodes[home_zipcode] == undefined) {
return_home_zipcodes[home_zipcode] = deepcopy(value['home_zipcodes'][home_zipcode]);
} else {
// If it existed, the merge it. "me" is the current zipcode data, other is the
// existing zipcode data.
var me = value['home_zipcodes'][home_zipcode];
var other = return_home_zipcodes[home_zipcode];
// We're gonna sum everything. 'total' is just yet another category for us
var categories = %(CATEGORIES)s;
categories.push('total');
['per_week', 'per_month'].forEach(function(by) {
// First, the total numbers
// Both in per_week and per_month, sum the existing total data for
// every category (including total).
categories.forEach(function(category) {
other[by]['total'][category]['incomes'] += me[by]['total'][category]['incomes'];
other[by]['total'][category]['num_cards'] += me[by]['total'][category]['num_cards'];
other[by]['total'][category]['num_payments'] += me[by]['total'][category]['num_payments'];
});
// Second, the week / month numbers
// For each date in the current data (the existing data does not need any change)
for (var date in me[by]) {
if (date == 'total')
continue;
// If this new data is not present in the existing data, just copy it.
if (other[by][date] == undefined) {
// Those only in me (and not in other)
other[by][date] = deepcopy(me[by][date]);
} else {
// If it exists in both, merge it. cur_other and cur_me are the particular data.
// e.g. other = 'home_zipcodes' / '28004';
// cur_other = 'home_zipcodes' / '28004' / 'per_week' / '201301'
var cur_other = other[by][date];
var cur_me = me[by][date];
// For each category (including total), sum the data
categories.forEach(function(category) {
cur_other[category]['incomes'] += cur_me[category]['incomes'];
cur_other[category]['num_cards'] += cur_me[category]['num_cards'];
cur_other[category]['num_payments'] += cur_me[category]['num_payments'];
});
}
}
});
}
}
});
if ( key == "28760" ) {
var cur_zipcode = return_value['home_zipcodes']['28760'];
if (cur_zipcode != undefined ) {
var cur_data = cur_zipcode['per_week']['201301'];
if (cur_data != undefined ) {
if (cur_zipcode['per_week']['total']['es_home']['incomes'] != 0) {
print("---------------------------------------------------------------------");
printjson(cur_zipcode['per_week']['total']['es_home']);
print("");
print("=>");
print("");
printjson(cur_data['es_home']);
print("");
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++");
print("++++++++++++++++++V A L U E S++++++++++++++++++++++++++++++++++++++++");
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++");
printjson(values);
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++");
print("++++++++++++++++++R E T U R N I N G++++++++++++++++++++++++++++++++++");
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++");
printjson(return_value);
}
}
}
}
return return_value;
}""" % dict( CATEGORIES = categories ))
# print map_func
# print reduce_func
nonAtomic = False
print "Procesando 1"
db.top_clients_week.map_reduce(
map_func,
reduce_func,
out=SON([("reduce", "top_clients_summary"), ('nonAtomic', nonAtomic)]))
print "Procesando 2"
db.top_clients_month.map_reduce(
map_func,
reduce_func,
out=SON([("reduce", "top_clients_summary"), ('nonAtomic', nonAtomic)]))
print "Hecho"
if TOP_CLIENTS:
print "Creating top clients summary"
top_clients_summary()
def shop_zipcode_summary():
from bson.code import Code
from bson.son import SON
db = create_mongoclient()
db.drop_collection('shop_zipcode_summary')
categories = db.cube_month.find().distinct('category')
categories = json.dumps(categories)
shared_code = """
var MIN = 100000000000;
var deepcopy = function (obj) {
return JSON.parse(JSON.stringify(obj));
}
// Auxiliar data
var self = this;
var empty_summary = function() {
return {
'avg' : 0.0,
'num_payments' : 0,
'min' : MIN,
'max' : 0,
'total' : 0.0,
'hours' : {
// 08 : ...
}
};
};
var empty_cube_data = function() {
return {
'avg' : 0,
'num_cards' : 0,
'num_payments' : 0,
'total' : 0
};
};
var empty_total_cube_data = function() {
return {
'avg' : 0,
'num_payments' : 0,
'total' : 0
};
};
var empty_cube = function() {
return {
'total' : {
'per_age' : {
'0' : empty_total_cube_data(),
'1' : empty_total_cube_data(),
'2' : empty_total_cube_data(),
'3' : empty_total_cube_data(),
'4' : empty_total_cube_data(),
'5' : empty_total_cube_data(),
'6' : empty_total_cube_data(),
'U' : empty_total_cube_data()
},
'per_gender' : {
'male' : empty_total_cube_data(),
'female' : empty_total_cube_data(),
'enterprise' : empty_total_cube_data(),
'unknown' : empty_total_cube_data()
}
},
'cubes' : {
'male' : {
'0' : empty_cube_data(),
'1' : empty_cube_data(),
'2' : empty_cube_data(),
'3' : empty_cube_data(),
'4' : empty_cube_data(),
'5' : empty_cube_data(),
'6' : empty_cube_data(),
'U' : empty_cube_data()
},
'female' : {
'0' : empty_cube_data(),
'1' : empty_cube_data(),
'2' : empty_cube_data(),
'3' : empty_cube_data(),
'4' : empty_cube_data(),
'5' : empty_cube_data(),
'6' : empty_cube_data(),
'U' : empty_cube_data()
},
'unknown' : {
'0' : empty_cube_data(),
'1' : empty_cube_data(),
'2' : empty_cube_data(),
'3' : empty_cube_data(),
'4' : empty_cube_data(),
'5' : empty_cube_data(),
'6' : empty_cube_data(),
'U' : empty_cube_data()
},
'enterprise' : {
'U' : empty_cube_data()
}
}
};
};
//
// We have an empty value
//
var value = {};
//
// We fill the fields (empty categories)
//
value['categories'] = {};
%(CATEGORIES)s.forEach(function(category){
value['categories'][category] = {
'months' : {},
'weeks' : {},
'total' : {
'days' : {},
'total' : empty_summary()
}
}
});
//
// We fill the fields (total)
value['total'] = {
'days' : {},
'weeks' : {},
'total' : empty_summary()
};
""" % dict(CATEGORIES = categories)
map_patterns_func = Code("""function () {
%(SHARED)s
// Precalculation of hours
var hours_data = {};
this.hours.forEach(function(hour_data) {
hours_data[hour_data['hour']] = {
'std' : hour_data['std'],
'min' : hour_data['min'],
'max' : hour_data['max'],
'num_cards' : hour_data['num_cards'],
'mode' : hour_data['mode'],
'num_payments' : hour_data['num_payments'],
'avg' : hour_data['avg'],
'total' : hour_data['num_payments'] * hour_data['avg']
}
});
// And now, we fill the real data.
value['categories'][self.category]['months'][self.month] = {
'days' : {},
'cubes' : empty_cube(),
'total' : empty_summary()
};
value['categories'][self.category]['months'][self.month]['days'][self.day] = {
'avg' : self.avg,
'std' : self.std,
'num_payments' : self.num_payments,
'min' : self.min,
'max' : self.max,
'num_cards' : self.num_cards,
'mode' : self.mode,
'hours' : hours_data,
'total' : self.avg * self.num_payments
};
value['categories'][self.category]['months'][self.month]['total'] = {
'avg' : self.avg,
'num_payments' : self.num_payments,
'min' : self.min,
'max' : self.max,
'total' : self.avg * self.num_payments,
'hours' : {}
};
for (var hour in hours_data) {
var cur_hour_data = hours_data[hour];
value['categories'][self.category]['months'][self.month]['total']['hours'][hour] = {
'min' : cur_hour_data['min'],
'max' : cur_hour_data['max'],
'num_payments' : cur_hour_data['num_payments'],
'avg' : cur_hour_data['avg'],
'total' : cur_hour_data['num_payments'] * cur_hour_data['avg']
};
}
value['categories'][self.category]['total']['days'][self.day] = {
'avg' : self.avg,
'num_payments' : self.num_payments,
'min' : self.min,
'max' : self.max,
'hours' : hours_data,
'total' : self.avg * self.num_payments
};
var summary_total = function() {
return {
'avg' : self.avg,
'num_payments' : self.num_payments,
'min' : self.min,
'max' : self.max,
'hours' : hours_data,
'total' : self.avg * self.num_payments
};
};
value['categories'][self.category]['total']['total'] = summary_total();
value['total']['days'][self.day] = summary_total();
value['total']['total'] = summary_total();
emit(this.shop_zipcode, value);
}""" % dict(
CATEGORIES = categories,
SHARED = shared_code
))
map_cube_month_func = Code("""function () {
%(SHARED)s
// Basic structure
value['categories'][self.category]['months'][self.month] = {
'days' : {},
'cubes' : empty_cube(),
'total' : empty_summary()
};
// Monthly, category data
// Individual data
value['categories'][self.category]['months'][self.month]['cubes']['cubes'][self.gender][self.age_code]['avg'] = self.avg;
value['categories'][self.category]['months'][self.month]['cubes']['cubes'][self.gender][self.age_code]['num_cards'] = self.num_cards;
value['categories'][self.category]['months'][self.month]['cubes']['cubes'][self.gender][self.age_code]['num_payments'] = self.num_payments;
value['categories'][self.category]['months'][self.month]['cubes']['cubes'][self.gender][self.age_code]['total'] = self.num_payments * self.avg;
// total per_age
value['categories'][self.category]['months'][self.month]['cubes']['total']['per_age'][self.age_code]['avg'] = self.avg;
value['categories'][self.category]['months'][self.month]['cubes']['total']['per_age'][self.age_code]['num_payments'] = self.num_payments;
value['categories'][self.category]['months'][self.month]['cubes']['total']['per_age'][self.age_code]['total'] = self.num_payments * self.avg;
// total per_gender
value['categories'][self.category]['months'][self.month]['cubes']['total']['per_gender'][self.gender]['avg'] = self.avg;
value['categories'][self.category]['months'][self.month]['cubes']['total']['per_gender'][self.gender]['num_payments'] = self.num_payments;
value['categories'][self.category]['months'][self.month]['cubes']['total']['per_gender'][self.gender]['total'] = self.num_payments * self.avg;
// TODO: there should be category-level totals and category-independent monthly totals and non monthly totals
// Category level total data. Individual
// value['categories'][self.category]['total']['cubes']['cubes'][self.gender][self.age_code]['avg'] = self.avg;
// value['categories'][self.category]['total']['cubes']['cubes'][self.gender][self.age_code]['num_payments'] = self.num_payments;
// value['categories'][self.category]['total']['cubes']['cubes'][self.gender][self.age_code]['total'] = self.avg * self.num_payments;
emit(this.shop_zipcode, value);
}""" % dict(
CATEGORIES = categories,
SHARED = shared_code
))
map_cube_week_func = Code("""function () {
%(SHARED)s
// Basic structure
value['categories'][self.category]['weeks'][self.week] = {
'cubes' : empty_cube(),
};
// Monthly, category data
// Individual data
value['categories'][self.category]['weeks'][self.week]['cubes']['cubes'][self.gender][self.age_code]['avg'] = self.avg;
value['categories'][self.category]['weeks'][self.week]['cubes']['cubes'][self.gender][self.age_code]['num_cards'] = self.num_cards;
value['categories'][self.category]['weeks'][self.week]['cubes']['cubes'][self.gender][self.age_code]['num_payments'] = self.num_payments;
value['categories'][self.category]['weeks'][self.week]['cubes']['cubes'][self.gender][self.age_code]['total'] = self.num_payments * self.avg;
// total per_age
value['categories'][self.category]['weeks'][self.week]['cubes']['total']['per_age'][self.age_code]['avg'] = self.avg;
value['categories'][self.category]['weeks'][self.week]['cubes']['total']['per_age'][self.age_code]['num_payments'] = self.num_payments;
value['categories'][self.category]['weeks'][self.week]['cubes']['total']['per_age'][self.age_code]['total'] = self.num_payments * self.avg;
// total per_gender
value['categories'][self.category]['weeks'][self.week]['cubes']['total']['per_gender'][self.gender]['avg'] = self.avg;
value['categories'][self.category]['weeks'][self.week]['cubes']['total']['per_gender'][self.gender]['num_payments'] = self.num_payments;
value['categories'][self.category]['weeks'][self.week]['cubes']['total']['per_gender'][self.gender]['total'] = self.num_payments * self.avg;
//
// total outside category
//
value['total']['weeks'][self.week] = empty_cube();
//
// total / total / per_age
value['total']['weeks'][self.week]['total']['per_age'][self.age_code]['avg'] = self.avg;
value['total']['weeks'][self.week]['total']['per_age'][self.age_code]['num_payments'] = self.num_payments;
value['total']['weeks'][self.week]['total']['per_age'][self.age_code]['total'] = self.avg * self.num_payments;
// total / total / per_gender
value['total']['weeks'][self.week]['total']['per_gender'][self.gender]['avg'] = self.avg;
value['total']['weeks'][self.week]['total']['per_gender'][self.gender]['num_payments'] = self.num_payments;
value['total']['weeks'][self.week]['total']['per_gender'][self.gender]['total'] = self.avg * self.num_payments;
// total / cubes / gender / age_code
delete value['total']['weeks'][self.week]['cubes'][self.gender][self.age_code]['num_cards'];
value['total']['weeks'][self.week]['cubes'][self.gender][self.age_code]['avg'] = self.avg;
value['total']['weeks'][self.week]['cubes'][self.gender][self.age_code]['num_payments'] = self.num_payments;
value['total']['weeks'][self.week]['cubes'][self.gender][self.age_code]['total'] = self.num_payments * self.avg;
// summary
value['total']['weeks'][self.week]['summary'] = {
'avg' : self.avg,
'num_payments' : self.num_payments,
'total' : self.num_payments * self.avg
};
// TODO: there should be category-level totals and category-independent weekly totals and non weekly totals
// Category level total data. Individual
// value['categories'][self.category]['total']['cubes']['cubes'][self.gender][self.age_code]['avg'] = self.avg;
// value['categories'][self.category]['total']['cubes']['cubes'][self.gender][self.age_code]['num_payments'] = self.num_payments;
// value['categories'][self.category]['total']['cubes']['cubes'][self.gender][self.age_code]['total'] = self.avg * self.num_payments;
emit(this.shop_zipcode, value);
}""" % dict(
CATEGORIES = categories,
SHARED = shared_code
))
reduce_func = Code("""function (key, values) {
%(SHARED)s
// Take the initial value, and fill it with the easy data
var reduced_value = value;
var mergeBasicTotal = function (other_total, me_total) {
// First, the basic data
var total = other_total['total'] + me_total['total'];
var num_payments = other_total['num_payments'] + me_total['num_payments'];
var avg;
if (num_payments == 0)
avg = 0.0;
else
avg = 1.0 * total / num_payments;
other_total['total'] = total;
other_total['avg'] = avg;
other_total['num_payments'] = num_payments;
};
var mergeTotalTotal = function (other_total, me_total) {
if (other_total['hours'] == undefined ) {
print("Expected total structure as first argument. Got:");
printjson(other_total);
throw new Error("Expected total structure as first argument. See the logs.");
} else if (me_total['hours'] == undefined ) {
print("Expected total structure as second argument. Got:");
printjson(me_total);
throw new Error("Expected total structure as second argument. See the logs.");
}
mergeBasicTotal(other_total, me_total);
if (me_total['min'] < other_total['min'])
other_total['min'] = me_total['min'];
if (me_total['max'] > other_total['max'])
other_total['max'] = me_total['max'];
// And now the hours:
for (var hour in me_total['hours'] ){
// If the hours do no exist, copy them
if (other_total['hours'][hour] == undefined) {
other_total['hours'][hour] = deepcopy(me_total['hours'][hour]);
} else {
// Otherwise, merge
var other_hour = other_total['hours'][hour];
var me_hour = me_total['hours'][hour];
var total = other_hour['total'] + me_hour['total'];
var num_payments = other_hour['num_payments'] + me_hour['num_payments'];
var avg;
if (num_payments == 0)
avg = 0.0;
else
avg = 1.0 * total / num_payments;
other_hour['total'] = total;
other_hour['avg'] = avg;
other_hour['num_payments'] = num_payments;
if (me_hour['min'] < other_hour['min'])
other_hour['min'] = me_hour['min'];
if (me_hour['max'] > other_hour['max'])
other_hour['max'] = me_hour['max'];
}
}
};
var mergeTotalDays = function(other_days, me_days) {
for (var day in me_days) {
// If the day does not exist, copy it
if (other_days[day] == undefined) {
other_days[day] = deepcopy(me_days[day]);
} else {
// Otherwise, merge each day
mergeTotalTotal(other_days[day], me_days[day]);
}
}
};
var mergeCubeTotals = function(other_cubes_total, me_cubes_total){
// Then, cubes/total. First cubes/total/per_age
var other_per_age = other_cubes_total['per_age'];
var me_per_age = me_cubes_total['per_age'];
for (var age in me_per_age) {
// If the existing is empty or does not exist, copy it
if (other_per_age[age] == undefined) {
other_per_age[age] = deepcopy(me_per_age[age]);
} else {
// Otherwise, merge
var total = other_per_age[age]['total'] + me_per_age[age]['total'];
var num_payments = other_per_age[age]['num_payments'] + me_per_age[age]['num_payments'];
var avg;
if (num_payments == 0)
avg = 0.0;
else
avg = 1.0 * total / num_payments;
other_per_age[age]['total'] = total;
other_per_age[age]['num_payments'] = num_payments;
other_per_age[age]['avg'] = avg;
}
}
// Then cubes/total/per_gender
var other_per_gender = other_cubes_total['per_gender'];
var me_per_gender = me_cubes_total['per_gender'];
for (var gender in me_per_gender) {
// If the existing is empty or does not exist, copy it
if (other_per_gender[gender] == undefined) {
other_per_gender[gender] = deepcopy(me_per_gender[gender]);
} else {
// Otherwise, merge
var total = other_per_gender[gender]['total'] + me_per_gender[gender]['total'];
var num_payments = other_per_gender[gender]['num_payments'] + me_per_gender[gender]['num_payments'];
var avg;
if (num_payments == 0)
avg = 0.0;
else
avg = 1.0 * total / num_payments;
other_per_gender[gender]['total'] = total;
other_per_gender[gender]['num_payments'] = num_payments;
other_per_gender[gender]['avg'] = avg;
}
}
}
var processCubes = function(other, me, category, month, week) {
// First cubes/cubes
for (var gender in me['cubes']['cubes']) {
// If the gender doesn't exist, copy it
if (other['cubes']['cubes'][gender] == undefined) {
other['cubes']['cubes'][gender] = deepcopy(me['cubes']['cubes'][gender]);
continue;
} else {
// Merge a gender, age (except for total)
for (var age in me['cubes']['cubes'][gender]) {
// If the age doesn't exist or it's empty, copy it
if (other['cubes']['cubes'][gender][age] == undefined || other['cubes']['cubes'][gender][age]['num_cards'] < 1) {
other['cubes']['cubes'][gender][age] = deepcopy(me['cubes']['cubes'][gender][age]);
continue;
} else {
// If the existing data has something and the
// current data is empty, just skip this one.
if (me['cubes']['cubes'][gender][age]['num_cards'] < 1) {
continue;
}
// It's impossible that this particular exist is replicated
// since for a zipcode::category::month::cubes::gender::age there is a
// single registry.
print("Replicated data found. shop_zipcode " + key + ", category " + category + ", month " + month + " or week " + week + ", cube (gender = " + gender + ", age = " + age + ") already had data. See the logs for comparison");
printjson(other['cubes']['cubes'][gender][age]);
printjson(me['cubes']['cubes'][gender][age]);
throw new Error("Replicated data found. shop_zipcode " + key + ", category " + category + ", month " + month + " or week " + week + ", cube (gender = " + gender + ", age = " + age + ") already had data. See the logs for comparison");
}
}
}
}
mergeCubeTotals(other['cubes']['total'], me['cubes']['total']);
};
var mergeCubes = function(other, me) {
for (var gender in me) {
for (var age_code in me[gender]) {
var other_data = other[gender][age_code];
var me_data = me[gender][age_code];
if (me_data['num_payments'] >= 1) {
other_data['num_payments'] += me_data['num_payments'];
other_data['total'] += me_data['total'];
other_data['avg'] += other_data['total'] / other_data['num_payments'];
}
}
}
}
// Take all values, and merge them one by one in reduced_value
values.forEach(function(value) {
// First, merge categories
%(CATEGORIES)s.forEach(function(category) {
// Take the data to be merged
var me_category = value['categories'][category];
var other_category = reduced_value['categories'][category];
// First, merge months
for (var month in me_category['months']) {
// If the month doesn't exist in reduced_value, copy it
if (other_category['months'][month] == undefined) {
other_category['months'][month] = deepcopy(me_category['months'][month]);
continue;
}
// Otherwise, merge.
var me_month = me_category['months'][month];
var other_month = other_category['months'][month];
// First, days
for (var day in me_month['days']) {
// If day does not exist, or it exists but it's empty, copy it
if (other_month['days'][day] == undefined || other_month['days'][day]['num_cards'] < 1) {
other_month['days'][day] = deepcopy(me_month['days'][day]);
continue;
} else {
// If the other data is not empty but this data is
// empty, skip it.
if (me_month['days'][day]['num_cards'] < 1) {
continue;
}
// It's impossible that the days are replicated, since
// for a zipcode::category::month::day, there is a single set
// of data. If this happens, we're summing information
// twice, which is an error.
print("Replicated data found. shop_zipcode " + key + ", category " + category + ", month " + month + ", day " + day + " already had data. See the logs for comparison.");
printjson(other_month['days'][day]);
printjson(me_month['days'][day]);
throw new Error("Replicated data found. shop_zipcode " + key + ", category " + category + ", month " + month + ", day " + day + " already had data. See the logs for comparison.");
}
}
// Then, cubes.
processCubes(other_month, me_month, category, month, 'none');
// Then, merge month/total.
// print("Calling total from category/month/total/total");
mergeTotalTotal(other_month['total'], me_month['total']);
}
// Then, merge weeks
for (var week in me_category['weeks']) {
// If the week doesn't exist in reduced_value, copy it
if (other_category['weeks'][week] == undefined) {
other_category['weeks'][week] = deepcopy(me_category['weeks'][week]);
continue;
}
// Otherwise, merge.
var me_week = me_category['weeks'][week];
var other_week = other_category['weeks'][week];
// Only cubes are available here.
processCubes(other_week, me_week, category, 'none', week);
// TODO: totals
}
// Then, merge category/total:
// print("Calling days from category/total/days");
mergeTotalDays(other_category['total']['days'], me_category['total']['days']);
// print("Calling total from category/total/total");
mergeTotalTotal(other_category['total']['total'], me_category['total']['total']);
});
// Then, merge total
// print("Calling days from total");
mergeTotalDays(reduced_value['total']['days'], value['total']['days']);
// print("Calling total from total");
mergeTotalTotal(reduced_value['total']['total'], value['total']['total']);
// merge weeks summary
for (var week in value['total']['weeks']) {
if (reduced_value['total']['weeks'][week] == undefined) {
reduced_value['total']['weeks'][week] = deepcopy(value['total']['weeks'][week]);
} else {
mergeCubes(reduced_value['total']['weeks'][week]['cubes'], value['total']['weeks'][week]['cubes']);
mergeCubeTotals(reduced_value['total']['weeks'][week]['total'], value['total']['weeks'][week]['total']);
mergeBasicTotal(reduced_value['total']['weeks'][week]['summary'], value['total']['weeks'][week]['summary']);
}
};
});
return reduced_value;
}""" % dict( CATEGORIES = categories, SHARED = shared_code ))
# dummy_reduce = Code("""function(key, values) { return values[0] }""")
# reduce_func = dummy_reduce
# print map_patterns_func
# print reduce_func
nonAtomic = False
PATTERNS = True
CUBE_MONTH = True
CUBE_WEEK = True
if PATTERNS:
print "Procesando patterns_month"
db.patterns_month.map_reduce(
map_patterns_func,
reduce_func,
out=SON([("reduce", "shop_zipcode_summary"), ('nonAtomic', nonAtomic)]))
if CUBE_MONTH:
print "Procesando cube_month"
db.cube_month.map_reduce(
map_cube_month_func,
reduce_func,
out=SON([("reduce", "shop_zipcode_summary"), ('nonAtomic', nonAtomic)]))
if CUBE_WEEK:
print "Procesando cube_week"
db.cube_week.map_reduce(
map_cube_week_func,
reduce_func,
out=SON([("reduce", "shop_zipcode_summary"), ('nonAtomic', nonAtomic)]))
print "Hecho"
if SHOP_ZIPCODE_SUMMARY:
print "Creating shop zipcode summary"
shop_zipcode_summary()
| UTF-8 | Python | false | false | 46,439 | py | 4,764 | mongo_import.py | 23 | 0.442645 | 0.438511 | 0 | 1,114 | 40.685817 | 261 |
xiepeiheng/Visualization-of-machine-maintenance-data | 13,322,988,581,719 | 80b2827dc7b2d9e545226b50d4eeca45ecc0a9e9 | cb7edf68504f88cee8313d1ebc12190528242ae5 | /Visualization of machine maintenance data/view/person/parse.py | 828bd1c2a3aa6a2e7037fb638383917afdc90dab | [
"MIT"
]
| permissive | https://github.com/xiepeiheng/Visualization-of-machine-maintenance-data | 894d292e25ca46dceae6e004ba72452366713c7c | 02bd235f21d2fe429c701929a3938fbc15385272 | refs/heads/main | 2023-08-16T14:45:35.131025 | 2021-10-20T15:19:19 | 2021-10-20T15:19:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, render_template, request
from function.sqlite_function import *
import sqlite3
from settings import DATABASE_PATH
_person = Blueprint('person', __name__, url_prefix='/')
@_parse.route('/parse', defaults={'time': '2021-06-292021-07-29'})
@_parse.route('/parse/<time>')
def parse1(time):
start_time = time[0:10]
stop_time = time[10:]
table = zfh(start_time, stop_time)
time = [start_time, stop_time]
return render_template('down_and_fault/parse/template_parse.html', time=time, table=table)
@_parse.route('/parse/ajax', methods=['POST'])
def parse2():
start_time = request.form['start']
stop_time = request.form['stop']
table = zfh(start_time, stop_time)
return render_template('down_and_fault/parse/parse.html', table=table)
def zfh(start_time, stop_time):
con, cur = db_open()
# 日期范围限制
hxy_r = f'''日期 >= "{start_time}" and 日期 <= "{stop_time}"'''
# 返回日期横坐标数组
time = sqliteObject_to_list_a(cur, f'''
select distinct 日期 from parse where {hxy_r}
''')
# 返回机组数据
crew = sqliteObject_to_list_a(cur, f'''
select distinct 机组 from parse where {hxy_r}
''')
# 表格内容顺序,机组编号,成材率,人均吨钢,吨电耗,单位产量,吨备件
table = sqliteObject_to_list_h(cur, f'''
select 机组,ifnull(ROUND(sum(正品)/sum(原料),2),''),ifnull(ROUND(sum(正品)/sum(人数),2),''),ifnull(ROUND(sum(耗电)/sum(正品),2),''),ifnull(ROUND(sum(正品)/sum(开机),2),''),ifnull(ROUND(sum(备件金额)/sum(正品),2),'')
from parse2
where {hxy_r}
GROUP BY 机组
''')
# # 图表内容顺序 人均吨钢,吨电耗,单位产量 吨备件和成材率不显示趋势,直接看最上面的总量即可
# # 图表的title文字,同时也可用于搜索
# pic_name = ['人均吨钢', '吨电耗', '单位产量']
# for i in pic_name:
# temp = sqliteObject_to_list_h(cur, f'''
# select 机组,{i}
# from parse1
# where {hxy_r}
# GROUP BY 机组
# ''')
#
#
#
#
#
# hxy1 = sqliteObject_to_list_h(cur, f'''
# select 机组,ROUND(sum(人均吨钢),2),ROUND(sum(吨电耗),2),ROUND(sum(单位产量),2),ROUND(sum(吨备件),2)
# from parse1
# where {hxy_r}
# GROUP BY 机组
# ''')
#
# hxy2 = sqliteObject_to_list_a(cur, f'''
# select 机组,ROUND(sum(正品)/sum(原料),2)
# from parse
# where {hxy_r}
# GROUP BY 机组
# ''')
# 每日趋势区域
db_close(con, cur)
return table
| UTF-8 | Python | false | false | 2,697 | py | 37 | parse.py | 17 | 0.574082 | 0.557235 | 0 | 88 | 25.306818 | 199 |
xenomarz/advanced-topics-in-deep-learning-236605-hw1 | 11,252,814,356,323 | 221e494744f3510ac9cc7a5a7f5dbc66ce85d58c | 86b9d66b4bae1098f99c1673e33af05bf3d3b3cc | /train_feature_extractor.py | c3514154dad1dddf86c7eeaf477b75fde6d3a4a1 | []
| no_license | https://github.com/xenomarz/advanced-topics-in-deep-learning-236605-hw1 | c5f33601f4556ca8bd650132a70b42812073ed22 | f5af0cd23da666bab391de7aee1e9baf00cdda74 | refs/heads/main | 2023-05-07T19:17:10.330984 | 2021-06-04T22:23:50 | 2021-06-04T22:23:50 | 372,843,482 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from torchvision.datasets.utils import download_url
import os
import sys
import tarfile
import hashlib
import torchvision
import torch
from torchvision.transforms import transforms
import numpy as np
import matplotlib.pyplot as plt
from augmentation import TwoCropsTransform
from moco import KeyEncoder
from moco import QueryEncoder
from moco import LinearClassifier
from datetime import datetime
from pathlib import Path
from moco import MoCo
from logger import Logger
# https://github.com/fastai/imagenette
dataset_url = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz'
dataset_filename = dataset_url.split('/')[-1]
dataset_foldername = dataset_filename.split('.')[0]
data_path = './data'
results_base_path = './results'
dataset_filepath = os.path.join(data_path,dataset_filename)
dataset_folderpath = os.path.join(data_path,dataset_foldername)
if __name__ == '__main__':
#####################################
# Download Dataset
#####################################
# https://github.com/fastai/imagenette
dataset_url = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz'
dataset_filename = dataset_url.split('/')[-1]
dataset_foldername = dataset_filename.split('.')[0]
data_path = './data'
dataset_filepath = os.path.join(data_path, dataset_filename)
dataset_folderpath = os.path.join(data_path, dataset_foldername)
os.makedirs(data_path, exist_ok=True)
download = False
if not os.path.exists(dataset_filepath):
download = True
else:
md5_hash = hashlib.md5()
file = open(dataset_filepath, "rb")
content = file.read()
md5_hash.update(content)
digest = md5_hash.hexdigest()
if digest != 'fe2fc210e6bb7c5664d602c3cd71e612':
download = True
if download:
download_url(dataset_url, data_path)
with tarfile.open(dataset_filepath, 'r:gz') as tar:
tar.extractall(path=data_path)
#####################################
# Create DataLoader
#####################################
size = 224
ks = (int(0.1 * size) // 2) * 2 + 1 # should be odd
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
train_transform_features_extractor = TwoCropsTransform(transforms.Compose([
transforms.RandomResizedCrop(scale=(0.2, 1), size=size),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size=ks)]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**__imagenet_stats)]))
dataset_train_features_extractor = torchvision.datasets.ImageFolder(os.path.join(dataset_folderpath, 'train'), train_transform_features_extractor)
# settings
t = 0.07
feature_extractor_train_epochs = 2000
feature_extractor_train_batch_size = 220
classifier_train_epochs = 5
momentum = 0.9
weight_decay = 1e-4
lr = 1e-4
k = 10000
checkpoint_granularity = 50
train_dataloader_features_extractor = torch.utils.data.DataLoader(
dataset_train_features_extractor,
batch_size=feature_extractor_train_batch_size,
num_workers=20,
drop_last=True,
shuffle=True,
pin_memory=True,
prefetch_factor=20)
#####################################
# Train
#####################################
moco = MoCo(k=k).cuda()
moco.train()
loss_features_extractor = torch.nn.CrossEntropyLoss().cuda()
optimizer_features_extractor = torch.optim.Adam(
moco.parameters(),
lr=lr)
results_dir_path = os.path.normpath(os.path.join(results_base_path, datetime.now().strftime('%Y-%m-%d-%H-%M-%S')))
Path(results_dir_path).mkdir(parents=True, exist_ok=True)
results_filepath = os.path.normpath(os.path.join(results_dir_path, 'feature_extractor_loss_results.npy'))
sys.stdout = Logger(filepath=os.path.join(results_dir_path, 'feature_extractor_training.log'))
feature_extractor_train_loss_array = np.array([])
for epoch_index in range(feature_extractor_train_epochs):
# --------------------------------------
# Train features extractor for one epoch
# --------------------------------------
feature_extractor_batch_loss_array = np.array([])
feature_extractor_epoch_loss = 0
print(f'Feature Extractor Epoch #{epoch_index + 1}')
print('------------------------------------------')
for batch_index, ((queries, keys), _) in enumerate(train_dataloader_features_extractor):
logits, labels, queries_features, keys_features = moco.forward(queries=queries.cuda(), keys=keys.cuda())
loss_output = loss_features_extractor(logits / t, labels)
optimizer_features_extractor.zero_grad()
loss_output.backward()
optimizer_features_extractor.step()
moco.update_key_encoder(keys=keys_features)
loss_item = loss_output.item()
feature_extractor_batch_loss_array = np.append(feature_extractor_batch_loss_array, [loss_item])
feature_extractor_epoch_loss = np.mean(feature_extractor_batch_loss_array)
print(f'Epoch: #{(epoch_index + 1):{" "}{"<"}{5}}| Batch: #{(batch_index + 1):{" "}{"<"}{5}}| Batch Loss: {loss_output.item():{" "}{"<"}{30}}| Epoch Loss: {feature_extractor_epoch_loss:{" "}{"<"}{30}}')
print('')
feature_extractor_train_loss_array = np.append(feature_extractor_train_loss_array, [feature_extractor_epoch_loss])
if (epoch_index + 1) % checkpoint_granularity == 0:
results = {
'feature_extractor_train_loss_array': feature_extractor_train_loss_array,
'feature_extractor_train_epochs': feature_extractor_train_epochs,
'feature_extractor_train_batch_size': feature_extractor_train_batch_size
}
np.save(file=results_filepath, arr=results, allow_pickle=True)
lastest_moco_filepath = os.path.normpath(os.path.join(results_dir_path, f'moco_{epoch_index + 1}.pt'))
torch.save(moco.state_dict(), lastest_moco_filepath)
| UTF-8 | Python | false | false | 6,305 | py | 10 | train_feature_extractor.py | 6 | 0.618715 | 0.599841 | 0 | 153 | 40.20915 | 214 |
ParamitaDhyana/python-demo | 7,335,804,178,100 | b35712ce412c2a9e0d7f37f2d83ee555577ac5c9 | e18508d7b29ca29537198fe86e39fd445426a28b | /20181017/io/PickleDemo.py | 343b5a8e413aa746f75c32d483877943ed60865f | []
| no_license | https://github.com/ParamitaDhyana/python-demo | e417bed4f2e95588dbee5957b926e14ff0c9d427 | 1e7d7f1cad449a7b1c88a61cf85d3db6387feb6a | refs/heads/master | 2020-03-18T14:28:05.496021 | 2018-11-07T04:29:08 | 2018-11-07T04:29:08 | 134,849,555 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pickle
import os
import json
d = dict(name='Bob', age=20, score=99)
# print(pickle.dumps(d))
# with open('c:/GitHub/python-demo/20181017/io/dump.txt', 'wb') as f:
# pickle.dump(d, f)
# print(json.dumps(d))
# json_str = '{"age":20,"name":"daniel","score":100}'
# j =json.loads(json_str)
# print(j['age'])
class Student(object):
def __init__(self,name,age,score):
self.name=name
self.age=age
self.score=score
def student2dict(std):
return {
'name':std.name,
'age':std.age,
'score':std.score
}
s=Student('小明',20,69)
print(json.dumps(s,default=student2dict)) | UTF-8 | Python | false | false | 640 | py | 59 | PickleDemo.py | 51 | 0.602201 | 0.566038 | 0 | 31 | 19.548387 | 69 |
ryan-way-leetcode/python | 13,022,340,846,249 | af5912ca997e82f990a9dee72e18417817dfada0 | 55d8050c64c95af7315ac2585e745ecd485ab125 | /242_ValidAnagram/solution.py | c7c16fbcb1b0a7abff39978159dc77d142d9ae74 | []
| no_license | https://github.com/ryan-way-leetcode/python | bf6db483048be95b85d347401917fe2335618054 | aa210e56d9953ce2c4084ec7bfc953e52efcb8f1 | refs/heads/main | 2023-08-18T21:19:39.693588 | 2021-10-19T19:50:26 | 2021-10-19T19:50:26 | 419,070,851 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def isAnagram(self, s: str, t: str) -> bool:
d_s = {}
d_t = {}
for c in s:
if c not in d_s:
d_s[c] = 0
d_s[c] += 1
for c in t:
if c not in d_t:
d_t[c] = 0
d_t[c] += 1
return d_t == d_s | UTF-8 | Python | false | false | 327 | py | 19 | solution.py | 19 | 0.321101 | 0.308869 | 0 | 16 | 19.5 | 48 |
denotepython/pythonbook | 13,254,269,118,451 | f9a92e78ee81153d7b998fb9877c80ce64d885ba | 1688cf63a2513238f3afdccdbb080aa0cca4004e | /learn python without code knowledge-- Craig Richardson/冒险一:太空飞船.py | 45ebea97dd038eb3d5ca5832498c3e95ac8bc0ce | []
| no_license | https://github.com/denotepython/pythonbook | 34910dd1379e5b5b9dce07c2126d02363af1ac0a | 885179a8ca67262c4db7fcb9b2673a3313a3ec92 | refs/heads/master | 2021-01-22T03:39:21.549355 | 2016-07-31T07:30:25 | 2016-07-31T07:30:25 | 58,024,965 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #raw_input()函数:使用户在提示符后面输入字符串
import time
shipname = "LQ_ship"
captain = "LQ"
location = "earth"
password = "1234"
password_temp = raw_input("Enter the password: ")
while password_temp != password:
print "password wrong"
password_temp = raw_input("Enter the password: ")
print "password right, weclome to the " + shipname
print ""
print "The spaceship " + shipname + " is visiting " + location
choice = ""
while choice != "/exit":
print "What would you like to do?"
print ""
print "a. Travel to another planet"
print "b. Fire"
print "c. self-destory"
print "input /exit to exit"
print ""
choice = raw_input("Enter your choice ")
if choice == "a":
destination = raw_input("Where do you want to go?")
print "leave " + location
print "travlling to " + destination
time.sleep(5)
print "arrived at " + destination
location = destination
elif choice == "b":
print "Fire"
time.sleep(3)
elif choice == "c":
confirm = raw_input("Are you sure to destory the space?")
if confirm == "y":
print "ship will self-destory in 3"
time.sleep(1)
print "2"
time.sleep(1)
print "1"
time.sleep(1)
print "Boom!"
else:
print "invalid input.please select a, b, or c." | UTF-8 | Python | false | false | 1,235 | py | 57 | 冒险一:太空飞船.py | 56 | 0.662781 | 0.652789 | 0 | 48 | 24.041667 | 62 |
AloneRoad/ElasticLog | 15,874,199,141,755 | 3b574b4abd35c73f3d4bf4f4bdb4535888433e5f | 46a23a57a6e94a588b9f211e672390717cdf65a4 | /src/web.py | c285fa1e15b353810320f5deba0a335f950a5395 | []
| no_license | https://github.com/AloneRoad/ElasticLog | 00c671dc1d6dc293ac4225f158571af783331da2 | 7c3a750a1cd18356464f0752488a51cc5f026e26 | refs/heads/master | 2021-01-22T11:20:25.702760 | 2012-04-04T18:20:40 | 2012-04-04T18:20:40 | 3,931,705 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! coding: utf-8
# pylint: disable-msg=W0311
"""
- Hiển thị giao diện:
+ 1 ô nhập truy vấn
+ 1 vùng biểu đồ
+ 1 vùng các kết quả tìm thấy
- Nhận truy vấn
+ Tìm trong ElasticSearch
+ Tổng hợp kết quả, vẽ biểu đồ theo thời gian hoặc các tham số đã tách ở bước phân tích
"""
from flask import Flask, request, render_template, make_response, abort
from pyelasticsearch import ElasticSearch
from mimetypes import guess_type
from datetime import datetime, timedelta
from redis import Redis
import os
import settings
try:
from collections import OrderedDict as odict
except ImportError:
from ordereddict import OrderedDict as odict
host, port = settings.CACHE.split(':')
CACHE = Redis(host=host, port=int(port))
INDEX = ElasticSearch('http://' + settings.ES_SERVERS[0])
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'GET':
query = '*'
graph_by = 'minute'
start = datetime.strftime(datetime.now() - timedelta(hours=1), '%Y-%m-%dT%H:%M:%S')
end = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')
else:
query = request.form.get('query', '*')
if '*' not in query:
query = '*' + query + '*'
graph_by = request.form.get('graph_by', 'second')
start = request.form.get('start')
end = request.form.get('end')
key = graph_by
query_dsl = {}
query_dsl['query'] = {
"filtered" : {
"query" : {
"query_string" : {"query" : query}
},
"filter" : {
"numeric_range" : {
"time" : {
"lt" : end,
"gte" : start
}
}
}
}
}
if key in ['second', 'minute', 'hour', 'day', 'week', 'month', 'year']:
query_dsl['facets'] = {
"counters": {
"date_histogram": {
"field" : "time",
"interval" : key
}
}
}
else:
query_dsl["facets"] = {
"counters" : {
"terms" : {
"script_field" : "_source.%s" % key
}
}
}
resp = INDEX.search(query=None, body=query_dsl, indexes=['log'])
results = resp.get('hits').get('hits')
records = []
for i in results:
record = {}
for k in i.get('_source').keys():
if not k.startswith('_'):
record[k] = i['_source'][k]
records.append(record)
counters = odict()
delta = timedelta(hours=7) # utc offset
if key in ['second', 'minute', 'hour', 'day', 'week', 'month', 'year']:
entries = resp.get('facets').get('counters').get('entries')
for i in entries:
counters[datetime.fromtimestamp(i['time'] / 1000) - delta] = i['count']
else:
for i in resp.get('facets').get('counters').get('terms'):
counters[i['term']] = i['count']
return render_template('home.html',
start=start, end=end,
query=query, graph_by=graph_by,
records=records, counters=counters)
@app.route('/public/<filename>')
def public_files(filename):
path = 'public/' + filename
if not os.path.exists(path):
abort(404, 'File not found')
filedata = open(path).read()
response = make_response(filedata)
response.headers['Content-Length'] = len(filedata)
response.headers['Content-Type'] = guess_type(filename)[0]
response.headers['Cache-Control'] = 'public'
response.headers['Expires'] = '31 December 2037 23:59:59 GMT'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False) | UTF-8 | Python | false | false | 3,837 | py | 11 | web.py | 10 | 0.528352 | 0.519078 | 0 | 132 | 27.598485 | 89 |
Umbrella-Digital/DataStream_Cookbook | 12,386,685,682,300 | 5b5cdd0dd8d430ec1bdd49364380105939c45789 | ee67ab08aac3a9c10d0e0c87888f46a1899be71a | /Recipes/lib/lotame.properties | e5c82a23a33cdc352d98755cc6c79bbf61c1bacc | [
"MIT"
]
| permissive | https://github.com/Umbrella-Digital/DataStream_Cookbook | f7e2474d741aaa5e41999ca04164dafced289fcc | feceb9cffd87696f3bcd0e24a66a5b5c7c244617 | refs/heads/master | 2022-11-15T22:33:21.678522 | 2020-07-07T17:36:54 | 2020-07-07T17:36:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#
# Filename:
#
# lotame.properties
#
#
# Basic Usage:
#
# 1. populate the following fields:
# username
# password
# client_id
#
#
[default]
auth_url=https://crowdcontrol.lotame.com/auth/v1/tickets
base_url=https://api.lotame.com/2
username=<username>
password=<password>
client_id=<client_id> | UTF-8 | Python | false | false | 353 | properties | 1 | lotame.properties | 1 | 0.631728 | 0.623229 | 0 | 24 | 13.75 | 56 |
rakesht2499/ip_model | 77,309,446,584 | 4510c090e44bc650fe1d07ae5056fe89fde64bc8 | e040bdc04bf124cfcd693b3add2786d9137d7367 | /Ip.py | 3028acac6e08ea4882200497cf49713e0cd4d67c | [
"Apache-2.0",
"Python-2.0"
]
| permissive | https://github.com/rakesht2499/ip_model | feea51ad8562e9e6614d555cecb39732937c0a84 | 951bef164fed2ae9142bff0f9f5b98b45e0fc2a1 | refs/heads/master | 2022-09-21T16:50:25.327634 | 2020-06-06T14:35:06 | 2020-06-06T16:24:32 | 251,326,314 | 5 | 0 | Apache-2.0 | false | 2020-06-03T22:19:12 | 2020-03-30T14:14:17 | 2020-06-03T18:51:09 | 2020-06-03T22:19:11 | 2,380 | 6 | 0 | 0 | Python | false | false | from ipaddress import ip_address, IPv4Address, IPv4Network, ip_network
from ip_model.IpObject import IpObject
from ip_model.Ipv4 import Ipv4
from ip_model.Ipv6 import Ipv6
class Ip(IpObject):
def __init__(self):
self.ipv4 = Ipv4()
self.ipv6 = Ipv6()
def __ip_decider(self, ip):
if isinstance(ip_address(ip), IPv4Address):
return self.ipv4
else:
return self.ipv6
def __cidr_decider(self, ip):
if isinstance(ip_network(ip), IPv4Network):
return self.ipv4
else:
return self.ipv6
def add(self, ip: str):
"""
Adds the given valid IP
:param ip: An Ip as String
:returns: True for every new addition
:raises :class:`ip_model.Exceptions.InvalidIpException` for invalid IP
"""
ipObject = self.__ip_decider(ip)
return ipObject.add(ip)
def remove(self, ip: str):
"""
Removed the given valid IP
:param ip: An Ip as String
:returns: str: The IP which is removed
:raises :class:`ip_model.Exceptions.InvalidIpException` for invalid IP
"""
ipObject = self.__ip_decider(ip)
return ipObject.remove(ip)
def is_present(self, ip: str):
"""
Checks if a given valid IP is present or not
:param ip: An Ip as String
:returns: True, if the element is present
False, if the element is not present
:raises :class:`ip_model.Exceptions.InvalidIpException` for invalid IP
"""
ipObject = self.__ip_decider(ip)
return ipObject.is_present(ip)
def add_cidr(self, cidr: str):
"""
Adds the given valid IP CIDR
:param cidr: String in CIDR notation
:return: True
:raises :class:`ip_model.Exceptions.InvalidIpException` for invalid IP CIDR
"""
ipObject = self.__cidr_decider(cidr)
return ipObject.add_cidr(cidr)
def remove_cidr(self, cidr: str):
"""
Removes the given valid IP CIDR
:param cidr: String in CIDR notation
:return: str: CIDR range which is removed
:raises :class:`ip_model.Exceptions.InvalidIpException` for invalid IP CIDR
"""
ipObject = self.__cidr_decider(cidr)
return ipObject.remove_cidr(cidr)
| UTF-8 | Python | false | false | 2,357 | py | 19 | Ip.py | 17 | 0.595248 | 0.58846 | 0 | 78 | 29.217949 | 84 |
bituka/e-eleksyon | 1,743,756,757,702 | e964be86e85fd852ce2fd924a825d40c098a818f | b0856bad0d28d74eac160bea58aabe37fe16f941 | /backend/vote/serializers.py | b86cc228a67b3bf87be8056519a9279ac2c2afc0 | []
| no_license | https://github.com/bituka/e-eleksyon | 90ab025c5aa5ec74f18776374e8376933216d26a | ecd494df83ecdfdd134c69213841051138ea32a3 | refs/heads/master | 2023-06-28T03:15:14.638512 | 2021-07-19T00:36:54 | 2021-07-19T00:36:54 | 364,792,939 | 0 | 3 | null | false | 2021-07-26T11:49:45 | 2021-05-06T05:17:50 | 2021-07-19T00:37:21 | 2021-07-20T05:59:44 | 3,264 | 0 | 1 | 0 | Vue | false | false | from rest_framework import serializers
from vote.models import Position, Candidate
class CandidateSerializer(serializers.ModelSerializer):
class Meta:
model = Candidate
fields = ['name', 'position', 'votes']
class PositionSerializer(serializers.ModelSerializer):
class Meta:
model = Position
fields = ['title'] | UTF-8 | Python | false | false | 352 | py | 15 | serializers.py | 4 | 0.707386 | 0.707386 | 0 | 12 | 28.416667 | 55 |
Developing-Community/Developing-Community-Web | 17,892,833,756,271 | 4eb3a92c9bd1f3edd898bcb62755fef1040a447b | c63baadbefa9ff9800553487c4e3a0c125f13b72 | /users/urls.py | b1be075cdd2f27a8bb8f133dccb52cf2debf8734 | [
"MIT"
]
| permissive | https://github.com/Developing-Community/Developing-Community-Web | 306245eafb857ee13c9f8e71ad38a23139681a03 | 733233da20148ae1affc33067c332f3619daa176 | refs/heads/master | 2018-09-17T05:39:15.940850 | 2018-09-15T10:27:25 | 2018-09-15T10:27:25 | 135,051,348 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from .views import (
UserCreateAPIView,
ProfileUpdateAPIView,
ProfileRetrieveAPIView)
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token
urlpatterns = [
path('auth/refresh_token/', refresh_jwt_token),
path('auth/verify_token/', verify_jwt_token),
path('auth/obtain_token/', obtain_jwt_token),
path('register/', UserCreateAPIView.as_view(), name='register'),
path('profile/', ProfileRetrieveAPIView.as_view(), name='view-profile'),
path('profile/<int:id>/update/', ProfileUpdateAPIView.as_view(), name='edit-profile'),
]
| UTF-8 | Python | false | false | 627 | py | 44 | urls.py | 43 | 0.714514 | 0.714514 | 0 | 16 | 38.1875 | 90 |
claudiospro/neointel_proyecto01 | 6,528,350,307,297 | 42310db0a9991ed30343166066fd224a460ccd57 | e5381fbb04c325818abc44d7c97ad0792c380014 | /docs/analisis/muestra02/proceso.py | 5e782e41272bc9667214138aa4e091335701938b | []
| no_license | https://github.com/claudiospro/neointel_proyecto01 | 4b64d0a77d5c39fd11fc499e4a70b3b3468b0f1b | f6f13f038b7b759511958885eb2e3681220b0980 | refs/heads/master | 2016-08-11T16:36:49.915964 | 2016-01-08T21:43:10 | 2016-01-08T21:43:10 | 49,088,342 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import re
import time
from openpyxl import load_workbook
from openpyxl import Workbook
from openpyxl.cell import column_index_from_string
import config.cols
cols = config.cols.huecos
wb10 = load_workbook(sys.argv[1])
ws11 = wb10['X']
wb20 = Workbook()
ws21 = wb20.active
ws21['A1'] = 'municipio'
ws21['B1'] = 'direccion'
list_21 = list()
i = j = 1
# print ws11.max_row
for row in ws11.rows:
if i == 1:
i+=1
j+=1
continue
index_21 = ''
municipio1 = ws11.cell(row = i, column = cols['municipio'])
direcion1 = ws11.cell(row = i, column = cols['direccion'])
if (municipio1.value is not None and direcion1.value is not None):
index_21+=municipio1.value
index_21+=direcion1.value
# print index_21
if ( (index_21 in list_21) == False ):
list_21.append(index_21)
municipio2 = ws21.cell(row = j, column = 1)
direcion2 = ws21.cell(row = j, column = 2)
municipio2.value = municipio1.value
municipio2.value = municipio2.value.decode('utf-8')
print type(municipio2.value)
direcion2.value = direcion1.value
print type(direcion2.value)
j+=1
i+=1
wb20.save(filename = 'procesado_calle_'+sys.argv[1])
| UTF-8 | Python | false | false | 1,315 | py | 97 | proceso.py | 85 | 0.601521 | 0.542966 | 0 | 53 | 23.811321 | 70 |
BertrandBordage/Titelouze | 6,219,112,656,622 | 1d34db9c0c965eacacd0a60287f25fcd0bd39d12 | 7f7f09dc32c5e8bac7cedd9b56782f882e671fee | /setup.py | 56c7ba0b4db2c5267f13ea642903163696c55633 | []
| no_license | https://github.com/BertrandBordage/Titelouze | 5509f858ccc7daf0bec0bac5cae50d0b385dc672 | 178b8e26d16a38993d0d252e1de2300f6513bec2 | refs/heads/master | 2021-01-21T09:59:25.304895 | 2013-04-17T15:54:44 | 2013-04-17T15:54:44 | 3,545,692 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
# coding: utf-8
from distutils.core import setup
setup(
name='Titelouze',
version='0.1.2',
author='Bertrand Bordage',
author_email='bordage.bertrand@gmail.com',
url='https://github.com/BertrandBordage/Titelouze',
packages=('titelouze',),
package_data={'titelouze': ['templates/*.ily']},
license='Creative Commons Attribution Non-Commercial Share Alike',
description='Framework to easily build LilyPond books.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: Free for non-commercial use',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Artistic Software',
],
)
| UTF-8 | Python | false | false | 859 | py | 18 | setup.py | 9 | 0.6461 | 0.640279 | 0 | 26 | 32.038462 | 70 |
eahmedali2005/GTM-DSM2 | 4,054,449,160,348 | 1a1fd7667b1d83fe79d5a5a42d0914663e02162a | 404d14312f008f9648b51b16dcca9716ab865d36 | /input_storage/component.py | 471cac379cc06710f1963b312c0c1938304f0b50 | []
| no_license | https://github.com/eahmedali2005/GTM-DSM2 | 898bee7019d800a8f70f0944fb8b58815c71bcc9 | 6aa949eed7ca44d3cef2847c082cb52d0cb18d90 | refs/heads/master | 2020-03-27T09:28:45.469432 | 2018-08-27T19:34:13 | 2018-08-27T19:34:13 | 146,344,871 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import string
def component_order():
return["envvar",\
"scalar",\
"channel",\
"xsect",\
"xsect_layer",\
"reservoir",\
"reservoir_connection",\
"gate",\
"gate_pipe_device",\
"gate_weir_device",\
"transfer",\
"io_file",\
"tidefile",\
"group",\
"group_member",\
"channel_ic",\
"reservoir_ic",\
"operating_rule",\
"oprule_expression",\
"oprule_time_series",\
"rate_coefficient",\
"group_variable",\
"group_variable_sed",\
"particle_insertion",\
"particle_filter",\
"particle_res_filter",\
"particle_flux_output",\
"particle_group_output",\
"input_climate",\
"input_transfer_flow",\
"input_gate",\
"boundary_stage",\
"boundary_flow",\
"source_flow",\
"source_flow_reservoir",\
"node_concentration",\
"reservoir_concentration",\
"input_time_series",\
"output_channel",\
"output_reservoir",\
"output_channel_source_track",\
"output_reservoir_source_track",\
"output_gate",\
"suspended_sediment_type",\
"suspended_sediment_boundary"]
def component_members():
return {"envvar":["name","value"],\
"scalar":["name","value"],\
"channel":["chan_no","length","manning","dispersion","upnode","downnode"],\
"xsect":["chan_no","dist","file"],\
"xsect_layer":["chan_no","dist","elev","area","width","wet_perim"],\
"reservoir":["name","area","bot_elev"],\
"reservoir_connection":["res_name","node","coef_in","coef_out"],\
"gate":["name","from_obj","from_identifier","to_node"],\
"gate_pipe_device":["gate_name","device","nduplicate","radius","elev","cf_from_node","cf_to_node","default_op"],\
"gate_weir_device":["gate_name","device","nduplicate","width","elev","height","cf_from_node","cf_to_node","default_op"],\
"transfer":["name","from_obj","from_identifier","to_obj","to_identifier"],\
"io_file":["model","type","io","interval","file"],\
"tidefile":["start_date","end_date","file"],\
"group":["name"],\
"group_member":["group_name","member_type","pattern"],\
"channel_ic":["chan_no","distance","stage","flow"],\
"reservoir_ic":["res_name","stage"],\
"operating_rule":["name","action","trigger"],\
"oprule_expression":["name","definition"],\
"oprule_time_series":["name","fillin","file","path"],\
"rate_coefficient":["group_name","constituent","variable","value"],\
"group_variable":["group_name","constituent","variable","value"],\
"group_variable_sed":["group_name","sed_zone","sed_layer","variable","value"],\
"particle_insertion":["node","nparts","delay","duration"],\
"particle_filter":["name","node","at_wb","fillin","file","path"],\
"particle_res_filter":["name","res_name","at_wb","fillin","file","path"],\
"particle_flux_output":["name","from_wb","to_wb","interval","file"],\
"particle_group_output":["name","group_name","interval","file"],\
"input_climate":["name","variable","fillin","file","path"],\
"input_transfer_flow":["transfer_name","fillin","file","path"],\
"input_gate":["gate_name","device","variable","fillin","file","path"],\
"boundary_stage":["name","node","fillin","file","path"],\
"boundary_flow":["name","node","sign","fillin","file","path"],\
"source_flow":["name","node","sign","fillin","file","path"],\
"source_flow_reservoir":["name","res_name","sign","fillin","file","path"],\
"node_concentration":["name","node_no","variable","fillin","file","path"],\
"reservoir_concentration":["name","res_name","variable","fillin","file","path"],\
"input_time_series":["group_name","variable","fillin","file","path"],\
"output_channel":["name","chan_no","distance","variable","interval","period_op","file"],\
"output_reservoir":["name","res_name","node","variable","interval","period_op","file"],\
"output_channel_source_track":["name","chan_no","distance","variable","source_group","interval","period_op","file"],\
"output_reservoir_source_track":["name","res_name","variable","source_group","interval","period_op","file"],\
"output_gate":["name","gate_name","device","variable","interval","period_op","file"],\
"suspended_sediment_type":["composition"],\
"suspended_sediment_boundary":["name","composition","percent"]}
def include_block():
return {\
"reservoir":"hydro_time_series",
"particle_flux_output":"particle",
"xsect":"grid",
"output_gate":"output_time_series",
"scalar":"parameter",
"boundary_stage":"hydro_time_series",
"node_concentration":"gtm_time_series",
"gate_pipe_device":"hydro_time_series",
"xsect_layer":"grid",
"group_variable":"gtm_spatial",
"group_member":"groups",
"oprule_expression":"operation",
"input_transfer_flow":"hydro_time_series",
"group":"groups",
"output_channel":"output_time_series",
"particle_group_output":"particle",
"transfer":"hydro_time_series",
"input_climate":"gtm_time_series",
"oprule_time_series":"operation",
"particle_insertion":"particle",
"channel":"grid",
"boundary_flow":"hydro_time_series",
"input_gate":"hydro_time_series",
"operating_rule":"operation",
"reservoir_concentration":"gtm_time_series",
"reservoir_ic":"initial_condition",
"output_channel_source_track":"output_time_series",
"gate":"hydro_time_series",
"output_reservoir_source_track":"output_time_series",
"particle_res_filter":"particle",
"rate_coefficient":"qual_spatial",
"input_time_series":"gtm_time_series",
"reservoir_connection":"hydro_time_series",
"source_flow":"hydro_time_series",
"gate_weir_device":"hydro_time_series",
"particle_filter":"particle",
"source_flow_reservoir":"hydro_time_series",
"channel_ic":"initial_condition",
"envvar":"configuration",
"output_reservoir":"output_time_series"}
def include_block_order():
return[\
"configuration",\
"parameter",\
"grid",\
"initial_condition",\
"hydro_time_series",\
"operation",\
"groups",\
"qual_time_series",\
"qual_spatial",\
"gtm_time_series",\
"gtm_spatial",\
"output_time_series",\
"particle"]
def ordered_print(items):
"""Given a sequence of items, prints them in order with number then item"""
for i,item in zip(range(len(items)),items):
print "%s: %s" % (i,item)
def generateNotepad():
""" Generates a Notepad++ user defined language file with syntax highlights for the keywords """
tablelist=component_order()
includes = set(include_block().values())
folds = string.join([x.upper() for x in tablelist+list(includes)]," ")
member_dict=component_members()
keys=[]
for key in member_dict:
keys+=member_dict[key]
keywords = string.join([key.upper() for key in keys]," ")
userfile = open("userDefineLangTemplate.xml",'r')
usertxt = userfile.read()
userfile.close()
usertxt=usertxt.replace("@FOLDS",folds)
usertxt=usertxt.replace("@KEYS",keywords)
userfile = open("userDefineLang.xml","w")
userfile.write(usertxt)
userfile.close()
if (__name__=="__main__"):
if len(sys.argv) == 1:
print "Usage: component order"
print " or: component members [table]"
else:
arg=sys.argv[1]
if arg == "order":
corder = component_order()
ordered_print(corder)
if arg == "members":
members = component_members()
if len(sys.argv)==2:
for key in component_order():
print "%s" % key
morder = members[key]
ordered_print(morder)
print "\n"
else:
ordered_print(component_members()[sys.argv[2]])
if arg == "notepad":
generateNotepad()
| UTF-8 | Python | false | false | 8,081 | py | 235 | component.py | 29 | 0.582972 | 0.582477 | 0 | 208 | 37.841346 | 127 |
karebearvfx/cspath-datastructures-capstone-project | 13,572,096,674,561 | a137360ebb521839774fca076b2502edfb9e9b22 | 57a6a0750f1015ebda6b7dbe7d47666581982073 | /finalcodes/script.py | 673aa8dfcc9075445e9e37ed043b728113b46065 | []
| no_license | https://github.com/karebearvfx/cspath-datastructures-capstone-project | 2865aeaa69a19f2ac8f42ea12638b322028b73a8 | 4b1b895aba4503534af47743139d62d248dfea5e | refs/heads/master | 2020-04-07T21:58:06.264145 | 2018-11-23T07:49:37 | 2018-11-23T07:49:37 | 158,748,618 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##import functions###
from data import *
from welcome import *
from hashmap import HashMap
from linkedlist import LinkedList
##GOAL:To retrieve a list of restaurants given a single input of what food type user enters
##
####variables###
#copy existing data
food_types = types
restaurants_list = restaurant_data
###functions####
##create a list of dictionary keys for array
##O(N)
def char_keys(lists):
char_keys = []
for item in lists:
if item[0] not in char_keys:
char_keys.append(item[0])
else:
pass
return char_keys
##create a list of values for keys starting with the first letter
#if there is more than one value, it is added to the values list
O(N)
def find_values(key,value_list):
key_value_list = []
for item in value_list:
if item[0] == key:
key_value_list.append(item)
return key_value_list
##create hashmap {first letter: food_types}, runtime = O (N^2) during map generation
##Runtime = O(1) when searching
food_types_keys = char_keys(food_types)
food_types_hashmap = HashMap(len(food_types_keys))
for i in food_types_keys:
values_list = find_values(i,food_types)
food_types_hashmap.assign(i,values_list)
"""
###TEST CHECK
for i in food_types_keys:
values = food_types_hashmap.retrieve(i)
print("values at {0} are {1}".format(i,values))
"""
##return list of food_types for user_input
def food_type_output(user_input):
return food_types_hashmap.retrieve(user_input)
#search_food_types(input_char)
#takes user_input and result from food_type_output
#check char match at each list position
#return search_food_type
#O(N)runtime
##not working like I want it to....
##assumes input of >1 char
def search_food_types(input_char, food_list):
word_list = food_list
len_list = len(word_list)
list_idx = 0
word_idx = len(input_char)
while len_list > 0:
if word_list[list_idx][0:word_idx] == input_char:
return word_list[list_idx]
break
else:
len_list -= 1
list_idx += 1
return("Option not found, please try again.")
##GENERATE restaurant_data_hashmap, runtime O(N^2)
##runtime for hashmap is O(1)
restaurant_data_hashmap = HashMap( len(food_types) )
for i in food_types:
restaurant_values_list = find_values(i,restaurants_list)
restaurant_data_hashmap.assign(i,restaurant_values_list)
"""
##TEST
for i in food_types:
shop_values = restaurant_data_hashmap.retrieve(i)
print("values at {0} are {1}".format(i,shop_values))
"""
def get_restaurant_data(search_food_types):
return restaurant_data_hashmap.retrieve(search_food_types)
"""
##TEST##
italian = get_restaurant_data('italian')
print (italian)
"""
def restaurants_sort_print(restaurants):
separator = "*******=======*******"
print("\n\nThe following restaurants found are:\n")
for item in restaurants:
print (separator)
print("\nName: {0}\n"
"Price: {1}/5\n"
"Rating: {2}/5\n"
"Address: {3}\n".format(item[1],item[2],item[3],item[4]))
"""
##TEST##
pizza = get_restaurant_data('pizza')
print (restaurants_sort_print(pizza))
"""
#Printing the Welcome Message
print_welcome()
##user interaction
while True:
user_input = str(input("\nWhat type of food would you like to eat?\nType the beginning of that food type and press enter to see if it's here.\n")).lower()
#Search for user_input in food types data structure here
##CONDITIONS
##must be valid string input, string length
##
if user_input not in food_types_keys:
print("Sorry, there's no food type beginning with '{0}'.\nPlease try another letter.".format(user_input))
else: #user_input in food_types_keys
food_output = food_type_output(user_input)
##for condition when there is only one type for the char
if len(food_output) == 1:
print ("The food choice beginning with '{0}' is: {1}".format(user_input,food_output[0]))
yn_input = str(input("\nWould you like to see the list of restaurants? y/n \n"))
if yn_input == 'y':
data = food_output[0]
print_data = restaurant_data_hashmap.retrieve(data)
restaurants_sort_print(print_data)
else:
pass
##for when there is a need to search the list through
else:
print ("The food choice beginning with '{0}' are: {1}".format(user_input,food_output))
user_input2 = (str(input("Please enter the first letters of the food you want. \n\n")))
data = search_food_types(user_input2, food_output)
print (data)
print ("The food choice beginning with '{0}' is: {1}".format(user_input2,data))
yn_input = str(input("\nWould you like to see the list of restaurants? y/n \n"))
if yn_input == 'y':
print_data = restaurant_data_hashmap.retrieve(data)
restaurants_sort_print(print_data)
else:
pass
| UTF-8 | Python | false | false | 5,491 | py | 1 | script.py | 1 | 0.585139 | 0.577855 | 0 | 211 | 23.805687 | 158 |
Yunshan-Liu/gdplib | 3,298,534,895,144 | fd61aed0c403d641327db7e1b84d24388fb78937 | f5e8a25d5585fc7a08a713760485627926a49dd7 | /gdplib/mod_hens/modular_integer.py | b0b7308f743af0bdd471ee3e0179720e2d350a8c | [
"BSD-3-Clause"
]
| permissive | https://github.com/Yunshan-Liu/gdplib | a408921745c4f5797ab9ecfd5e371f6d4cdd3a95 | f453abcde1906018635c2245414b707b69fa110c | refs/heads/master | 2023-03-06T04:29:29.078550 | 2021-02-10T04:19:05 | 2021-02-10T04:19:05 | 298,096,123 | 0 | 0 | BSD-3-Clause | true | 2020-09-23T21:15:04 | 2020-09-23T21:15:03 | 2020-09-23T20:10:34 | 2020-09-23T20:10:32 | 393 | 0 | 0 | 0 | null | false | false | """Heat integration case study.
This is example 1 of the Yee & Grossmann, 1990 paper "Simultaneous optimization
models for heat integration--II".
DOI: 10.1016/0098-1354(90)85010-8
This is a modification to support the incorporation of standardized exchanger
modules using integer variables for module selection.
"""
from __future__ import division
from pyomo.environ import (Constraint, Integers, log, Var)
from pyomo.gdp import Disjunct, Disjunction
from gdplib.mod_hens import common
def build_single_module(cafaro_approx, num_stages):
m = build_model(cafaro_approx, num_stages)
# Require modular
for hot, cold, stg in m.valid_matches * m.stages:
disj = m.exchanger_exists[hot, cold, stg]
disj.modular.indicator_var.fix(1)
disj.conventional.deactivate()
# Must choose only one type of module
@m.Disjunct(m.module_sizes)
def module_type(disj, size):
"""Disjunct for selection of one module type."""
disj.no_other_module_types = Constraint(
expr=sum(m.num_modules[hot, cold, stage, area]
for hot, cold in m.valid_matches
for stage in m.stages
for area in m.module_sizes
if area != size) == 0)
m.select_one_module_type = Disjunction(
expr=[m.module_type[area] for area in m.module_sizes])
return m
def build_require_modular(cafaro_approx, num_stages):
m = build_model(cafaro_approx, num_stages)
# Require modular
for hot, cold, stg in m.valid_matches * m.stages:
m.exchanger_exists[hot, cold, stg].conventional.deactivate()
return m
def build_modular_option(cafaro_approx, num_stages):
return build_model(cafaro_approx, num_stages)
def build_model(use_cafaro_approximation, num_stages):
"""Build the model."""
m = common.build_model(use_cafaro_approximation, num_stages)
m.num_modules = Var(
m.valid_matches, m.stages, m.module_sizes,
doc="The number of modules of each size at each exchanger.",
domain=Integers, bounds=(0, 100), initialize=0)
# improve quality of bounds
for size in m.module_sizes:
for var in m.num_modules[:, :, :, size]:
var.setub(m.max_num_modules[size])
for hot, cold, stg in m.valid_matches * m.stages:
disj = m.exchanger_exists[hot, cold, stg]
disj.conventional = Disjunct()
if not use_cafaro_approximation:
disj.conventional.exchanger_area_cost = Constraint(
expr=m.exchanger_area_cost[stg, hot, cold] * 1E-3 >=
m.exchanger_area_cost_factor[hot, cold] * 1E-3 *
m.exchanger_area[stg, hot, cold] ** m.area_cost_exponent)
else:
disj.conventional.exchanger_area_cost = Constraint(
expr=m.exchanger_area_cost[stg, hot, cold] * 1E-3 >=
m.exchanger_area_cost_factor[hot, cold] * 1E-3 * m.cafaro_k
* log(m.cafaro_b * m.exchanger_area[stg, hot, cold] + 1)
)
m.BigM[disj.conventional.exchanger_area_cost] = 100
disj.conventional.exchanger_fixed_cost = Constraint(
expr=m.exchanger_fixed_cost[stg, hot, cold] ==
m.exchanger_fixed_unit_cost[hot, cold])
@disj.conventional.Constraint(m.module_sizes)
def no_modules(_, area):
return m.num_modules[hot, cold, stg, area] == 0
disj.modular = Disjunct()
disj.modular.exchanger_area_cost = Constraint(
expr=m.exchanger_area_cost[stg, hot, cold] * 1E-3 ==
sum(m.module_area_cost[hot, cold, area]
* m.num_modules[hot, cold, stg, area]
for area in m.module_sizes)
* 1E-3)
disj.modular.exchanger_fixed_cost = Constraint(
expr=m.exchanger_fixed_cost[stg, hot, cold] ==
m.module_fixed_unit_cost * sum(m.num_modules[hot, cold, stg, area]
for area in m.module_sizes))
disj.modular.exchanger_area = Constraint(
expr=m.exchanger_area[stg, hot, cold] ==
sum(area * m.num_modules[hot, cold, stg, area]
for area in m.module_sizes))
disj.modular_or_not = Disjunction(
expr=[disj.modular, disj.conventional])
# Area requirement
disj.exchanger_required_area = Constraint(
expr=m.exchanger_area[stg, hot, cold] * (
m.U[hot, cold] * m.LMTD[hot, cold, stg]) >=
m.heat_exchanged[hot, cold, stg])
m.BigM[disj.exchanger_required_area] = 5000
return m
| UTF-8 | Python | false | false | 4,607 | py | 63 | modular_integer.py | 38 | 0.610593 | 0.598654 | 0 | 119 | 37.714286 | 79 |
RaulRC/genetic-neural-optimizer | 14,044,543,074,586 | ef41af978a0cf35a5d252a2b0ac29fc5a8a6f331 | ce983badab44f2fe35ab63ad2da0612611657b32 | /src/config_mnist_regularization.py | 3a022b3559bc5c8509730f7e5c21bbf8d703fd38 | [
"MIT"
]
| permissive | https://github.com/RaulRC/genetic-neural-optimizer | c4e3cf9c7bd08ee9394205fc245a8c492e976e28 | fa169cdc9b43c58470c3e7a7214185d56e61579a | refs/heads/master | 2023-02-16T17:25:43.938905 | 2019-09-19T17:11:43 | 2019-09-19T17:11:43 | 209,153,737 | 1 | 0 | MIT | false | 2023-02-02T06:39:38 | 2019-09-17T20:48:27 | 2021-04-30T09:07:15 | 2023-02-02T06:39:38 | 504 | 1 | 0 | 7 | Python | false | false | iterations = 10
generations_list = [200]
populations_list = [6]
elitism_list = [0.5]
mutables_list = [1]
EPOCHS = 3
MODE = None
| UTF-8 | Python | false | false | 128 | py | 26 | config_mnist_regularization.py | 18 | 0.679688 | 0.601563 | 0 | 7 | 17.285714 | 24 |
outlaw6/AdventOfCode2020 | 8,718,783,650,071 | 64c9e258dfd6464d08bb912467556168620e148d | 6cffbac77a72ce5fb84bc530d654b5bbec8e4b10 | /TreeTobboganProblem.py | 4545bb02868f638e6e6b74065d357298485efd8f | []
| no_license | https://github.com/outlaw6/AdventOfCode2020 | 8ca30b5f757c1d72eca3d76ba72ded32eb49787d | f5a3c75d0fcff10316255eaa18b1a940b91a92b4 | refs/heads/master | 2023-03-08T00:07:47.623887 | 2021-02-15T18:22:29 | 2021-02-15T18:22:29 | 322,637,242 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | lista1 = []
list_of_list = []
#print(lista1)\
f = open('input.txt.txt', 'r')
temp_list = []
c=0
for x in f:
temp_list.append(x.strip('\n'))
steps = 3
counter = 0
treeLine = 1
while treeLine <= len(temp_list) - 1:
if temp_list[treeLine][steps % 31] == '#':
print("Correct: ", treeLine, steps, len(temp_list[treeLine]))
counter+=1
steps+=7
treeLine+=1
else:
steps+=3
treeLine+=1
#print(x)
print(counter, steps, steps%31, treeLine)
| UTF-8 | Python | false | false | 450 | py | 18 | TreeTobboganProblem.py | 17 | 0.62 | 0.584444 | 0 | 26 | 16.269231 | 63 |
jmzhao/foo.bar | 14,671,608,311,671 | 75a52e4dcf648fa2a7b3dd97f95bc7942726395b | 675337d68d87f121f65e5e322c2a5fe7c9b7d717 | /save_beta_rabbit/solution.py | dbb381fb1dcfae250113325a578d0802978e070a | []
| no_license | https://github.com/jmzhao/foo.bar | 48490b70d9ca5538f05f4e78c6fb11d78592b150 | f5d9f690096e3a3dfc79c1ef3d207991fabca553 | refs/heads/master | 2020-12-06T11:27:17.613193 | 2016-09-12T05:34:09 | 2016-09-12T05:34:09 | 66,224,006 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def answer(food, grid):
costs = all_possible_cost(grid)
try :
m = max(x for x in costs if x <= food)
except ValueError :
return -1
return food - m
def all_possible_cost(grid) :
n = len(grid)
a = [[set() for _ in range(n)] for _ in range(n)]
for i in range(n) :
for j in range(n) :
if i == 0 :
if j == 0 :
a[i][j] = set([0])
else :
a[i][j] = set(x + grid[i][j] for x in a[i][j-1])
else :
if j == 0 :
a[i][j] = set(x + grid[i][j] for x in a[i-1][j])
else :
a[i][j] = set(x + grid[i][j] for x in a[i-1][j] | a[i][j-1])
return a[n-1][n-1]
| UTF-8 | Python | false | false | 760 | py | 31 | solution.py | 26 | 0.389474 | 0.375 | 0 | 24 | 30.666667 | 80 |
StevenLu2004/code-x-code-for-china-2019 | 12,421,045,430,874 | 6ec5095e102743e8cdb05ff06322a26c8cdd1863 | 7e1e61987e3e5b660a6605aea25a303265550314 | /practice/drawWindowsPane.py | 9fa09e93fd30fe54d6ee76e598e42b793160609e | []
| no_license | https://github.com/StevenLu2004/code-x-code-for-china-2019 | ad4dc08324e59485652995b027cd6d22aac85724 | aac7397e6a4d3e5e0731b81f5c1932873c579488 | refs/heads/master | 2020-06-19T18:43:35.335510 | 2019-09-06T18:44:19 | 2019-09-06T18:44:19 | 196,827,801 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3.5
import turtle
import random
screen = turtle.Screen()
t = turtle.Turtle()
screen.colormode(255)
screen.tracer(0)
t.speed(0)
t.hideturtle()
def f1(a, b, k):
return a * k + b * (1 - k)
def line(x1, y1, x2, y2):
t.penup()
t.goto(x1, y1)
t.pendown()
t.goto(x2, y2)
t.penup()
def singlePane(x, y, L, l, q, cnt, fRefresh = False):
q %= 4
if q == 0:
fx = fy = 1
elif q == 1:
fx = -1; fy = 1
elif q == 2:
fx = fy = -1
else:
fx = 1; fy = -1
line(x, y, x + fx * L, y)
line(x, y, x, y + fy * L)
for i in range(cnt):
line(x + fx * f1(L, l, i / (cnt - 1)), y, x + fx * f1(L, l, i / (cnt - 1)), y + fy * f1(L, l, i / (cnt - 1)))
line(x, y + fy * f1(L, l, i / (cnt - 1)), x + fx * f1(L, l, i / (cnt - 1)), y + fy * f1(L, l, i / (cnt - 1)))
if fRefresh:
screen.update()
def windowsPane(x, y, L, l, cnt, variation = 0, fRefresh = True):
v = [1 + (random.random() - .5) * variation for _ in range(4)]
t.color("green")
singlePane(x, y, L * v[0], l * v[0], 0, cnt)
t.color("red")
singlePane(x, y, L * v[1], l * v[1], 1, cnt)
t.color("blue")
singlePane(x, y, L * v[2], l * v[2], 2, cnt)
t.color("yellow")
singlePane(x, y, L * v[3], l * v[3], 3, cnt)
if fRefresh:
screen.update()
def main():
windowsPane(0, 0, 200, 100, 3, 0)
screen.onkey(turtle.bye, "Escape")
screen.listen()
return 0
if __name__ == "__main__":
main()
screen.mainloop()
| UTF-8 | Python | false | false | 1,390 | py | 28 | drawWindowsPane.py | 27 | 0.531655 | 0.484173 | 0 | 61 | 21.786885 | 111 |
mbroihier/tennisball | 377,957,149,865 | 76a8fe5f82a9866895f7505f65e0afedb5b1c55a | 0659564ac57fe2aae239199aea57db967e9e2fb6 | /tennisball.py | 680ef225291de0ef903ec57e074d9236c0b6feeb | [
"MIT"
]
| permissive | https://github.com/mbroihier/tennisball | 4455f866cf44a9d86a13f117820b79779b6d1b22 | e91efa7d60be056f2f6bf3461c6a8bb19043eac4 | refs/heads/master | 2020-07-12T11:15:03.706775 | 2020-03-13T21:49:47 | 2020-03-13T21:49:47 | 204,803,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Aug 19, 2019
@author: broihier
'''
import LED
import Ping
class TennisBall():
'''
Tennis Ball class
'''
def __init__(self):
self.ping = Ping.Ping()
self.led = LED.LED()
def run_tennisball(self):
'''
run_tennisball - send pings and reports time between ping detections
'''
print("Starting IO")
average = self.ping.start()
while True:
try:
currentDistance = self.ping.pulse()
print("Delta: ", currentDistance)
if currentDistance < average:
self.led.on()
else:
self.led.off()
except KeyboardInterrupt:
print("Terminating via keyboard stop")
self.ping.stop()
self.led.stop()
break
except Exception as err:
print(err)
if __name__ == "__main__":
BALL = TennisBall()
BALL.run_tennisball()
| UTF-8 | Python | false | false | 1,015 | py | 8 | tennisball.py | 5 | 0.48867 | 0.482759 | 0 | 43 | 22.604651 | 76 |
TJJTJJTJJ/pytorch_CycleGAN_frame | 1,142,461,336,566 | 4ea08c7661fd3b85a7e21bc8ff5398cd69f9d674 | a5401a19c810f83188d706083b32c6733f252182 | /utils/util.py | b220d42460a5bb3e453e84bf616c3a78570d358d | []
| no_license | https://github.com/TJJTJJTJJ/pytorch_CycleGAN_frame | 556fd42c35461e12337a3a2645bf3a61d87e96c3 | 08e40983688ead674fcb4c00f4e29517b8c41ab8 | refs/heads/master | 2020-04-04T20:14:54.105230 | 2018-11-05T15:29:19 | 2018-11-05T15:29:19 | 156,239,442 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/python3
# -*- coding:utf-8 -*-
# @Time : 18-10-30 下午11:41
# Author : TJJ
__all__ = ['mkdirs', 'mkdir', 'tensor2im']
def mkdirs(paths):
"""
根据paths生成文件夹
:param paths: list或者str
:return:
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype) | UTF-8 | Python | false | false | 1,001 | py | 12 | util.py | 10 | 0.608784 | 0.57712 | 0 | 37 | 25.486486 | 74 |
Gracewx/MouseBrainHierarchy | 5,446,018,579,604 | 618bf63538e8b7b9061947afd1296ac8637c3112 | 82cfed2d1f933c52689e11a462f5b5e5670ebdb8 | /run_TCCT_module.py | 68e805e6222e91c86c9f102d1ea5c78e56ca596c | [
"BSD-2-Clause"
]
| permissive | https://github.com/Gracewx/MouseBrainHierarchy | 9db9fff39fe4643d152296ed30523a70d6f5d246 | 8e4e0dd271fab84a745d8101aef2f995eb07a2bc | refs/heads/master | 2023-03-18T18:47:28.558214 | 2019-10-10T22:28:41 | 2019-10-10T22:28:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import logging
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from IterativeMethod import iterativeTCCT
"""
Created on Mon Jun 24 16:04:07 2019
@author: Hannah Choi
"""
"""
This code finds intra- or inter-module hierarchies of cortical network,
based on the cortico-cortical (CC), thalamo-cortical (TC), and cortico-thalamic (CT) connectivity data.
"""
# In[]:
CreConf = 1 # 1 if using CC hierarhcy with Cre-confidence; 0 if not
input_dir = r'./Input/' # Directory with the file "CC_TC_CT_clusters.xlsx" & "clustermapping.xlsx"
output_dir = r'./Output/module/' # Directory to save the ouputs from the experimental data
# In[]: Define the module.
module = 'VisualMedial' #'VisualMedial' #'inter_predefined'
# In the paper, we used the following: 'VisualMedial' & 'inter_predefined'
# Possible modules: 'VisualMedial', 'Visual', 'Medial', 'Auditory', 'Somatomotor', 'PFC', 'Lateral', 'inter_predefined', 'inter'
# In[]: 9 clusters of cortico-cortico & thalamo-courtical source-line-target pairs
xls=pd.ExcelFile(input_dir+"CC_TC_CT_clusters.xlsx")
df=pd.read_excel(xls,'CC+TC clusters with MD avged')
df=df[(df.hemi == "ipsi")&(df.creline != "C57BL/6J / Emx1")&(df.target != "VISC")&(df.source != "SSp-un")&(df.target != "SSp-un")&(df.source != "VISC")]
list_module = df["Cortical Target Module"].unique()
clu_ffb=pd.read_excel(input_dir+"clustermapping.xlsx")
# In[]: If inter-module, change all the target & source area names to the module name
if (module == 'inter') or (module == 'inter_predefined'):
for i_module in range(0,len(list_module)):
df.loc[df["Cortical Target Module"] == list_module[i_module],'target']= list_module[i_module]
df.loc[df["Cortical Source Module"] == list_module[i_module],'source'] = list_module[i_module]
# In[]: Cortical and thalamic hierarchy from CC+TC iteration
if CreConf == 1:
h_CC_TC=pd.read_excel(output_dir+"TC_CCconf_iter_"+module+".xls")
elif CreConf == 0:
h_CC_TC=pd.read_excel(output_dir+"TC_CCnoconf_iter_"+module+".xls")
C_areas = h_CC_TC[h_CC_TC["CortexThalamus"]=="C"]["areas"].unique()
T_areas = h_CC_TC[h_CC_TC["CortexThalamus"]=="T"]["areas"].unique()
# In[]: 2 clusters (FF/FB) based on LDA of cortico-thalamic source-line-target pairs (dfCT)
dfCT = pd.read_excel(input_dir+"CT_sourcelayer_FFB.xls")
dfCT=dfCT[['source','target','FFB_LDA','Cortical Source Module']]
dfCT = dfCT.rename(columns={"FFB_LDA":"ffb"})
# In[]: For intra medial, select only the areas within the chosen module for cortico-thalamic connections (dfCT)
if (module != 'inter') and (module != 'inter_predefined'):
if module == 'VisualMedial':
dfCT.loc[((dfCT["Cortical Source Module"] == "Visual")|(dfCT["Cortical Source Module"] == "Medial")),"Cortical Source Module"]='VisualMedial'
dfCT = dfCT[(dfCT["Cortical Source Module"] == module)]
dfCT = dfCT[(dfCT["source"]!='RSPagl')&(dfCT["target"]!='RSPagl')&(dfCT["source"]!='RSPd')&(dfCT["target"]!='RSPd')
&(dfCT["source"]!='RSPv')&(dfCT["target"]!='RSPv')]
else:
dfCT = dfCT[(dfCT["Cortical Source Module"] == module)]
# In[]: For intra medial, select only the areas within the chosen module for thalamo-cortical source-line-target pairs (dfTC)
dfTC=pd.read_excel(output_dir+"inputexpanded_TC9_"+module+".xls")
dfTC = dfTC.rename(columns={"ffb_c":"ffb"})
if (module != 'inter') and (module != 'inter_predefined'):
if module == 'VisualMedial':
dfTC.loc[((dfTC["Cortical Target Module"] == "Visual")|(dfTC["Cortical Target Module"] == "Medial")),"Cortical Target Module"]='VisualMedial'
dfTC = dfTC[(dfTC["Cortical Target Module"] == module)]
dfTC = dfTC[(dfTC["source"]!='RSPagl')&(dfTC["target"]!='RSPagl')&(dfTC["source"]!='RSPd')&(dfTC["target"]!='RSPd')
&(dfTC["source"]!='RSPv')&(dfTC["target"]!='RSPv')]
else:
dfTC = dfTC[(dfTC["Cortical Target Module"] == module)]
# In[]: Merge dataframes of CT and TC connections
if (module == 'inter') or (module == 'inter_predefined'):
dfCT = dfCT[['Cortical Source Module','target','ffb']]
dfCT = dfCT.rename(columns={"Cortical Source Module": "source"})
else:
dfCT = dfCT[['source','target','ffb']]
dfTC = dfTC[['source','target','ffb']]
dfVT = pd.concat([dfTC, dfCT], ignore_index=True)
# In[]: Produce expanded data frame with FF/FB, hierarchy values as source & target for each pair of TC+CT connections
source_areas = dfVT["source"].unique()
target_areas = dfVT["target"].unique()
num_TC = np.shape(dfTC)[0]
num_CT = np.shape(dfCT)[0]
dfVT.to_excel(output_dir+'inputexpanded_TC9CT2_'+module+'.xls')
# In[ ]: Find initial hierarchy scores of thalamic areas (21)
n_T_areas=len(T_areas) # 21 thalamic regions
hrs=range(0,n_T_areas)
hrt=range(0,n_T_areas)
hrc=range(0,n_T_areas)
for i in range(0,n_T_areas):
hrs[i]=-np.mean(dfVT[dfVT.source == T_areas[i]].ffb)
if len(dfVT[dfVT.target == T_areas[i]]) != 0:
hrt[i]=np.mean(dfVT[dfVT.target == T_areas[i]].ffb)
else:
hrt[i]=0
hrc[i]=(hrs[i]+hrt[i])/2
data=[T_areas,hrc]
data=np.transpose(data)
columns = ['areas','h']
dfiT = pd.DataFrame(data,columns=columns)
#dfiT.to_excel(output_dir+'initialhierarchy_TC9CT2_'+module+'.xls')
#dfiT.head()
# In[ ]: Iterate thalamic + cortical hierarhcy scores
n_iter = 20
if CreConf == 1:
dfiC = pd.read_excel(output_dir+"CC_conf_iter_"+module+".xls")
elif CreConf == 0:
dfiC = pd.read_excel(output_dir+"CC_noconf_iter_"+module+".xls")
dfiC['h'] = dfiC[n_iter]
dfVC = pd.read_excel(output_dir+"inputexpanded_CC9_"+module+".xls")
dfVC = dfVC[["source","target","ffb_nc","ffb_c"]]
if CreConf == 1:
dfVC["ffb"] = dfVC["ffb_c"]
elif CreConf == 0:
dfVC["ffb"] = dfVC["ffb_nc"]
dfiT = dfiT[["areas","h"]]
dfiC = dfiC[["areas","h"]]
dfVT = dfVT[["source","target","ffb"]]
dfVC = dfVC[["source","target","ffb"]]
hr_iter = iterativeTCCT(dfiC, dfVC, dfiT, dfVT, n_iter)
iteration=np.arange(0,n_iter+1,1)
n_area=np.shape(hr_iter)[0]
allareas = hr_iter["areas"].unique()
##########################################################
""" Figure of hierarchy score iterations """
fig,ax=plt.subplots()
for i in range(0,n_area):
y=np.squeeze(np.asarray(hr_iter[hr_iter.areas==allareas[i]].ix[:,1::1]))
ax.plot(iteration,y)
ax.set_xlim([0, n_iter])
ax.set_xticks(np.arange(0, n_iter, step=5))
ax.set_xlabel('iter')
ax.set_ylabel('hierarchy value')
ax.set_title('confidence adjusted')
plt.show()
#fig.savefig(output_dir+"TCCT_CCnoconf_iter_"+module+".pdf", bbox_inches='tight')
##########################################################
##########################################################
""" Figure showing correlation between hierarchy scores before & after iterations"""
hr_final = hr_iter[:][n_iter]
hr_initial = hr_iter[:][0]
f = plt.figure()
plt.plot(hr_initial,hr_final,'ro')
plt.xlabel('initial hierarchy')
plt.ylabel('final hierarchy')
plt.title('r='+str(np.corrcoef(hr_initial, hr_final)[0, 1]))
plt.show()
#f.savefig(output_dir+"TCCT_init_vs_fin_CCnoconf_"+module+".pdf", bbox_inches='tight')
##########################################################
##########################################################
'''Save hierarchy scores before and after iteration'''
for i_area in range(0,n_area):
if hr_iter['areas'][i_area] in list(dfiC['areas']):
hr_iter.loc[i_area,'CortexThalamus'] = 'C'
else:
hr_iter.loc[i_area,'CortexThalamus'] = 'T'
hr_iter = hr_iter[['areas','CortexThalamus', 0,n_iter] ]
hr_iter_save = hr_iter[(hr_iter.CortexThalamus=='C')]
if CreConf == 1:
hr_iter_save.to_excel(output_dir+'TCCT_CCconf_iter_'+module+'.xls')
elif CreConf == 0:
hr_iter_save.to_excel(output_dir+'TCCT_CCnoconf_iter_'+module+'.xls')
##########################################################
# In[]: Print out global hierarchy scores of CC+TC+CT connectivity data
dfi_TCCT = hr_iter[["CortexThalamus","areas",0,n_iter]]
dfi_TCCT = dfi_TCCT.rename(columns={0: "h0", n_iter:"h_iter"})
dfV_CC = dfVC[['source','target','ffb']]
dfV_TCCT = dfVT[["source","target","ffb"]]
dfi_cortex1 = dfi_TCCT[(dfi_TCCT.CortexThalamus == 'C')]
dfi_cortex1 = dfi_cortex1[['areas','h_iter']]
dfV_CC = dfV_CC.join(dfi_cortex1.set_index('areas'), on ='source')
dfV_CC=dfV_CC.rename(columns={"h_iter": "hs"})
dfV_CC = dfV_CC.join(dfi_cortex1.set_index('areas'), on ='target')
dfV_CC=dfV_CC.rename(columns={"h_iter": "ht"})
dfV_CC = dfV_CC.dropna()
hg_CC_1 = dfV_CC.ffb*(dfV_CC.ht- dfV_CC.hs)
dfi_thalamus1=dfi_TCCT[(dfi_TCCT.CortexThalamus == 'T')]
dfi_thalamus1 = dfi_thalamus1[['areas','h_iter']]
dfV_TCCT = dfV_TCCT.join(dfi_thalamus1.set_index('areas'), on ='source')
dfV_TCCT=dfV_TCCT.rename(columns={"h_iter": "hs"})
dfV_TCCT = dfV_TCCT.join(dfi_cortex1.set_index('areas'), on ='target')
dfV_TCCT=dfV_TCCT.rename(columns={"h_iter": "ht"})
dfV_TCCT = dfV_TCCT.dropna()
hg_TCCT_1 = dfV_TCCT.ffb*(dfV_TCCT.ht- dfV_TCCT.hs)
hg_cortex_TCCT_iter = np.mean(hg_CC_1)
hg_TCCT_iter = np.mean(hg_CC_1.append(hg_TCCT_1))
#########################################
dfV_CC = dfVC[['source','target','ffb']]
dfV_TCCT = dfVT[["source","target","ffb"]]
dfi_cortex1 = dfi_TCCT[(dfi_TCCT.CortexThalamus == 'C')]
dfi_cortex1 = dfi_cortex1[['areas','h0']]
dfV_CC = dfV_CC.join(dfi_cortex1.set_index('areas'), on ='source')
dfV_CC=dfV_CC.rename(columns={"h0": "hs"})
dfV_CC = dfV_CC.join(dfi_cortex1.set_index('areas'), on ='target')
dfV_CC=dfV_CC.rename(columns={"h0": "ht"})
dfV_CC = dfV_CC.dropna()
hg_CC_1 = dfV_CC.ffb*(dfV_CC.ht- dfV_CC.hs)
dfi_thalamus1=dfi_TCCT[(dfi_TCCT.CortexThalamus == 'T')]
dfi_thalamus1 = dfi_thalamus1[['areas','h0']]
dfV_TCCT = dfV_TCCT.join(dfi_thalamus1.set_index('areas'), on ='source')
dfV_TCCT=dfV_TCCT.rename(columns={"h0": "hs"})
dfV_TCCT = dfV_TCCT.join(dfi_cortex1.set_index('areas'), on ='target')
dfV_TCCT=dfV_TCCT.rename(columns={"h0": "ht"})
dfV_TCCT = dfV_TCCT.dropna()
hg_TCCT_1 = dfV_TCCT.ffb*(dfV_TCCT.ht- dfV_TCCT.hs)
hg_cortex_TCCT_init = np.mean(hg_CC_1)
hg_TCCT_init = np.mean(hg_CC_1.append(hg_TCCT_1))
print('hg of CC+TC+CT before iterate cortex & thalamus='+str(hg_TCCT_init))
print('hg of CC+TC+CT iterate cortex='+str(hg_cortex_TCCT_iter))
print('hg of CC+TC+CT iterate cortex & thalamus='+str(hg_TCCT_iter))
'''Save global hierarchy scores'''
newDF= pd.DataFrame([])
newDF=newDF.append(pd.DataFrame({'hg_TCCT_init':hg_TCCT_init, 'hg_cortex_TCCT_iter':hg_cortex_TCCT_iter,
'hg_TCCT_iter':hg_TCCT_iter},index=[0]))
newDF.to_excel(output_dir+'ghs_TCCT_'+module+'.xls')
| UTF-8 | Python | false | false | 10,942 | py | 13 | run_TCCT_module.py | 12 | 0.616158 | 0.60647 | 0 | 285 | 36.385965 | 152 |
RyanPoy/sweet_orm | 8,272,107,045,055 | a298952dd1b730f93682cff9b03ad1a94430f61d | ba6c148c63179fda44e366c7b0752a54fee5838a | /tests/integration/for_sqlite/test_relation_has_one_sqlite.py | 88720a5ee86d1772b12fa12328ffa403b4a8ed8f | []
| no_license | https://github.com/RyanPoy/sweet_orm | b706731cbfd97f1c4fb5a61132305b8b3785de04 | bd19c5218d02b706e5ebe92d0771c9ee8dade006 | refs/heads/master | 2023-05-07T01:02:02.964371 | 2021-05-08T03:35:14 | 2021-05-08T03:35:14 | 266,485,666 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding: utf8
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
import unittest
from tests.integration.for_sqlite.helper import User, Mobile, Car
class TestHasOneToSQLite(unittest.TestCase):
def setUp(self):
self.remove_record()
def tearDown(self):
self.remove_record()
def remove_record(self):
Car.delete_all()
User.delete_all()
def test_query(self):
u = User.create(name="Jon", age=31)
Car.create(name="Benz", user_id=u.id)
c = User.first().car
self.assertEqual(Car, type(c))
self.assertEqual('Benz', c.name)
self.assertEqual(u.id, c.user_id)
def test_query_with_include(self):
u = User.create(name="Jon", age=31)
Car.create(name="Benz", user_id=u.id)
c = User.include('car').first().car
self.assertEqual(Car, type(c))
self.assertEqual('Benz', c.name)
self.assertEqual(u.id, c.user_id)
def test_create(self):
u = User.create(name="Jon", age=31)
car_id = Car.create(name="Benz", user=u).id
c = Car.find(car_id)
self.assertEqual(u.id, c.user_id)
u = c.user
self.assertEqual("Jon", u.name)
self.assertEqual(31, u.age)
def test_save(self):
u = User.create(name="Jon", age=31)
car_id = Car(name="Benz", user=u).save().id
c = Car.find(car_id)
self.assertEqual(u.id, c.user_id)
u = c.user
self.assertEqual("Jon", u.name)
self.assertEqual(31, u.age)
def test_update(self):
u1 = User.create(name="Jon", age=31)
u2 = User.create(name="Lily", age=21)
c = Car(name="Benz", user=u1).save()
self.assertEqual(u1.id, c.user_id)
c.update(user=u2)
self.assertEqual(u2.id, c.user_id)
c = Car.where(name='Benz').first()
self.assertEqual(u2.id, c.user_id)
def test_delete_cascade(self):
user_id1 = User.create(name="Jon", age=31).id
Car.create(name="Benz", user_id=user_id1)
user_id2 = User.create(name="Jon", age=31).id
Car.create(name="Mazda", user_id=user_id2)
self.assertEqual(2, Car.count())
User.where(id=user_id1).delete()
self.assertEqual(1, Car.count())
User.find(user_id2).delete()
self.assertEqual(0, Car.count())
def test_delete_all_cascade(self):
user_id1 = User.create(name="Jon", age=31).id
Car.create(name="Benz", user_id=user_id1)
user_id2 = User.create(name="Jon", age=31).id
Car.create(name="Mazda", user_id=user_id2)
self.assertEqual(2, Car.count())
User.delete_all()
self.assertEqual(0, Car.count())
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 2,797 | py | 65 | test_relation_has_one_sqlite.py | 59 | 0.574187 | 0.557383 | 0 | 99 | 27.252525 | 74 |
Peteliuk/practicebot | 3,616,362,474,339 | f673b8b921f3edb07a23c83964c3715f07286000 | b23c962bb77ddd424e0e6723b0ebb6adc3a51d46 | /bot/constants/variables.py | 859478eaa3837aaf43dc8146699779e5d3e3d871 | []
| no_license | https://github.com/Peteliuk/practicebot | 82bbd0b17fdae98608c8b56b9bc7f19ff2c76ce3 | d588516465499b1b36b253ab8b717cbfe6132050 | refs/heads/master | 2020-12-27T15:57:04.220563 | 2020-10-02T14:08:47 | 2020-10-02T14:08:47 | 237,960,372 | 0 | 0 | null | false | 2020-10-02T14:08:49 | 2020-02-03T12:31:32 | 2020-02-05T09:56:39 | 2020-10-02T14:08:48 | 96 | 0 | 0 | 1 | Python | false | false | STATUS_VARIANTS = [
(1, 'created'),
(2, 'rejected'),
(3, 'accepted'),
(4, 'completed'),
]
| UTF-8 | Python | false | false | 126 | py | 37 | variables.py | 35 | 0.396825 | 0.365079 | 0 | 6 | 20 | 25 |
nullpass/grocerylist | 1,614,907,734,196 | 78ee524f76d2920764f3cf1ddfe78e57a9cd5c0e | afd11578be01987c999597685ee8ad81ccf6be60 | /items/forms.py | 549b0f1dfbf7276b43a4f28a3c446e5c24970c31 | []
| no_license | https://github.com/nullpass/grocerylist | daa284cfc9280012bbef1bf6a7468f79c1181627 | b254b48cb85f43b5863acbed384d9a774af2e944 | refs/heads/master | 2021-01-21T08:01:40.043083 | 2015-08-22T17:23:01 | 2015-08-22T17:23:01 | 21,701,763 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # items/forms.py
from django.forms import ModelForm, CheckboxSelectMultiple, RadioSelect
from . import models
class EmbedItemForm(ModelForm):
""" The item-add form that is embedded in StoreDetailView.html """
class Meta:
fields = (
'name',
'price',
'from_isle',
)
model = models.Item
def __init__(self, *args, **kwargs):
super(EmbedItemForm, self).__init__(*args, **kwargs)
self.fields['from_isle'].empty_label = None
self.fields['name'].widget.attrs['size'] = 20
self.fields['from_isle'].widget.attrs['style'] = 'width: 150px;'
class ItemForm(ModelForm):
""" A standard item create form """
class Meta:
fields = (
'name',
'price',
'from_isle',
)
model = models.Item
widgets = {'from_isle': RadioSelect() }
def __init__(self, *args, **kwargs):
super(ItemForm, self).__init__(*args, **kwargs)
self.fields['from_isle'].empty_label = None
| UTF-8 | Python | false | false | 1,062 | py | 50 | forms.py | 38 | 0.546139 | 0.541431 | 0 | 40 | 25.55 | 72 |
atom-chen/myCode | 6,141,803,280,441 | bac04b55281f103da60ea3324ba0bd3bcb13f406 | 6a0dec28e7d606075c86c3f43b527cd04112347b | /tool/sdk_tools/scripts/tool_splash.py | 74e1da8a1d6ab6fd3d09a3c12d92f2406db565ab | []
| no_license | https://github.com/atom-chen/myCode | 8b8af86f2c98255174b26fba3c819aba973f33e7 | ebecd0af139ffb4caf0d3c7f603b27c461f0839f | refs/heads/master | 2020-12-04T13:31:54.251342 | 2017-02-28T02:20:43 | 2017-02-28T02:20:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sys
import file_utils
import config_utils
import os
import os.path
import time
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import ElementTree
import xml.dom.minidom
import subprocess
import shutil
def copyAndRenameImg(demoName, demoDir):
splashDir = file_utils.curDir + '/_splashes/' + demoName
demoResDir = demoDir + '/res/drawable-hdpi'
#找图片,重命名,复制到destDir
for i in range(1, 6):
splashFullPath = demoResDir + '/' + 'game_splash_' + str(i) + '.png'
if os.path.exists(splashFullPath):
os.remove(splashFullPath)
img_path = splashDir + '/' + str(i) + '.png'
if os.path.exists(img_path):
if not os.path.exists(demoResDir):
os.makedirs(demoResDir)
shutil.copy(img_path, splashFullPath)
file_utils.printF("copy splash : %s", demoName + ': game_splash_' + str(i))
def main():
file_utils.printF("-------------------------copy splashes start------------------------")
configFile = file_utils.getFullPath("channels_config.xml")
dom = xml.dom.minidom.parse(configFile)
root = dom.documentElement
channellist = root.getElementsByTagName('channel')
for channel in channellist:
sdk_name = ""
params = channel.getElementsByTagName("param")
for param in params:
if "sdk_name" == param.getAttribute("name"):
sdk_name = param.getAttribute("value")
break
demoDir = os.path.dirname(file_utils.curDir) + "/" + channel.getAttribute('path')
if os.path.exists( demoDir ):
copyAndRenameImg(sdk_name, demoDir)
#else:
# file_utils.printF("can not find dir: %s", demoDir)
file_utils.printF("-------------------------copy splashes over------------------------\r\n\r\n")
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 2,087 | py | 223 | tool_splash.py | 108 | 0.579507 | 0.578057 | 0 | 61 | 32.934426 | 101 |
ponkbrown/flaskBlog | 6,253,472,394,659 | f985d81d854a7e5b64247224158fba8695718308 | a9a13a0b2636c6f7ba97352fc7eaf2e043a734a4 | /app.py | ba291d3eea77b9b5a9271939dda3a7ce6272832a | []
| no_license | https://github.com/ponkbrown/flaskBlog | 8bc351c19f26d19d40e9cc6c90976f8e2c08b897 | 80bd82cd88421bf9db2852f05b8deba3b66347a4 | refs/heads/master | 2016-08-09T13:11:27.832163 | 2016-03-18T22:08:03 | 2016-03-18T22:08:03 | 54,070,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python3
from flask import Flask, render_template
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
app = Flask(__name__)
manager = Manager(app)
bootstrap = Bootstrap(app)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/showSignin')
def showSignin():
return render_template('signin.html')
@app.route('/showSignup')
def showSignUp():
return render_template('signup.html')
if __name__ == "__main__":
manager.run()
| UTF-8 | Python | false | false | 507 | py | 5 | app.py | 2 | 0.696252 | 0.69428 | 0 | 23 | 21.043478 | 41 |
hanyiwen/imooc-django-ref | 15,264,313,777,357 | 5ddf4f33639e1cc98f2cdc02cb5180ede01da99f | 092d3ba8db5cebafda6a2699ec746439ceea7faa | /apps/courses/__init__.py | c4597a391ed4008f32c095bf87b8e747e7b1bef9 | []
| no_license | https://github.com/hanyiwen/imooc-django-ref | b12eb8552da144e3186383c6d4d106ed7559cf84 | 2f717fda56d276671f4ab94d2a46f0b33ab0810f | refs/heads/master | 2020-04-04T17:14:41.564562 | 2018-11-15T16:57:34 | 2018-11-15T16:57:34 | 156,113,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 新建app时并没有引用apps的配置 因此需要
default_app_config = "courses.apps.CoursesConfig"
| UTF-8 | Python | false | false | 106 | py | 5 | __init__.py | 3 | 0.815789 | 0.815789 | 0 | 2 | 37 | 49 |
ritazchen/AAIA | 11,630,771,467,456 | a340de99528b061ffaa3d6e47e48adf85b04348a | 85cf4fedfcf301467e62d46f9926d557be7c28b1 | /AA/Programa.py | f8eeae331688b742846ccec2e8bb75266f228741 | []
| no_license | https://github.com/ritazchen/AAIA | 96439b8f6a2e443862927f024b5769ee8e00bf0e | 92fdb1c251440c0acd21289773b59d2574a272af | refs/heads/master | 2020-09-14T05:13:16.844399 | 2019-12-06T11:26:34 | 2019-12-06T11:26:34 | 223,029,250 | 0 | 0 | null | true | 2019-11-20T21:17:47 | 2019-11-20T21:17:46 | 2019-11-20T19:49:24 | 2019-11-20T19:49:22 | 204 | 0 | 0 | 0 | null | false | false | #import pygame
import sys
#from AA.configuracoes import *
from AA.Pacman import *
from AA.Fantasma import *
pygame.init()
vec = pygame.math.Vector2
class Programa:
def __init__(self):
self.janela = pygame.display.set_mode((LARGURA, ALTURA)) # Criacao da janela (dimensoes)
self.clock = pygame.time.Clock() # Define o clock pro fps
self.executando = True
self.state = 'tela de inicio' # Quando começa o programa, aparece a tela de inicio #28
pygame.display.set_caption("Pac Man by: Rita Chen & Vitor Queiroz") # Titulo do programa
self.icon = pygame.image.load("imagens/pacman_32px.png")
pygame.display.set_icon(self.icon) # Coloca na janela o desenho do pacman (no cabeçalho)
self.largura_quadradoGrid = LARGURA_LAB // COLS
self.altura_quadradoGrid = ALTURA_LAB // ROWS
self.paredes = []
self.moedas = []
self.fantasmas = []
self.jogador_posicao = None
self.fantasma_posicao = []
self.recorde = 0
self.pt = 0
self.load()
self.jogador = Pacman(self, vec(self.jogador_posicao))
self.cria_inimigos()
self.passei = 0
def run(self):
while self.executando: #enquanto o programa estiver executando...
if self.state == 'tela de inicio': #se estiver na tela inicial (antes de jogar)
self.telaInicio_eventos()
self.telaInicio_atualiza()
self.telaInicio_desenha()
elif self.state == 'jogando': #se estiver na tela de jogo
self.jogo_eventos()
self.jogo_atualiza()
self.jogo_desenha()
elif self.state == 'game over': #se estiver na tela de jogo
self.gameover_eventos()
self.gameover_atualiza()
self.gameover_desenha()
else:
self.executando = False
self.clock.tick(FPS)
pygame.quit() # fecha o programa
sys.exit()
def escreve_texto(self, texto, janela, posicao, tamanhoFonte, cor, nomeFonte, centralizado=False): # Escreve texto na tela
fonte = pygame.font.SysFont(nomeFonte, tamanhoFonte)
text = fonte.render(texto, False, cor)
text_size = text.get_size()
#Para centralizar o texto na tela
if centralizado:
posicao[0] = posicao[0] - text_size[0]//2
posicao[1] = posicao[1] - text_size[1]//2
janela.blit(text, posicao)
def load(self):
self.inicio = pygame.image.load('imagens/tela_inicio.png')
self.aperta_jogar = pygame.image.load('imagens/tela_inicio_aperta.png')
self.background = pygame.image.load('imagens/labirinto_fechado.png')
self.perdeu = pygame.image.load('imagens/tela_perdeu.png')
self.aperta_jogar_dnv = pygame.image.load('imagens/tela_perdeu_aperta.png')
#self.background = pygame.transform.scale(self.background, (LARGURA_LAB, ALTURA_LAB)) #transforma o background de forma a caber na janel
#faz leitura das coisas existentes
with open("coisas.txt", 'r') as arquivo:
for indice_y, linha in enumerate(arquivo):
for indice_x, objeto in enumerate(linha):
if objeto == 'P':
self.paredes.append(vec(indice_x, indice_y)) #passa as coordenadas de cada parede para a lista paredes
elif objeto == 'M':
self.moedas.append(vec(indice_x, indice_y)) #passa as coordenadas de cada moeda para a lista moedas
elif objeto == 'J':
self.jogador_posicao = [indice_x, indice_y] #passa as coordenadas de cada moeda para a lista moedas
elif objeto in ["1", "2", "3", "4"]:
self.fantasma_posicao.append(vec(indice_x, indice_y)) #passa as coordenadas de inicio dos inimigos
arquivo.close()
with open("recorde.txt", 'r') as arquivo:
self.recorde = int(arquivo.read())
arquivo.close()
def cria_inimigos(self):
#cria um inimigo em cada posição
for indice_x, posicao in enumerate(self.fantasma_posicao):
self.fantasmas.append(Fantasma(self, vec(posicao), indice_x))
def desenha_grid(self): #matriz de posicoes para demarcar onde o jogador poderá andar, paredes, moedas..
for x in range(LARGURA//self.largura_quadradoGrid):
pygame.draw.line(self.janela, CINZA, (x*self.largura_quadradoGrid, 0), (x*self.largura_quadradoGrid, ALTURA))
for x in range(ALTURA//self.altura_quadradoGrid):
pygame.draw.line(self.janela, CINZA, (0, x*self.altura_quadradoGrid), (LARGURA, x*self.altura_quadradoGrid))
#for parede in self.paredes:
#pygame.draw.rect(self.background, AQUAMARINE, (parede.x*self.largura_quadradoGrid, parede.y*self.altura_quadradoGrid,
#self.largura_quadradoGrid, self.altura_quadradoGrid))
#for moeda in self.moedas:
#pygame.draw.rect(self.background, LARANJA, (moeda.x*self.largura_quadradoGrid, moeda.y*self.altura_quadradoGrid,
#self.largura_quadradoGrid, self.altura_quadradoGrid))
def reset(self):
if int(self.recorde) < self.jogador.pontuacao:
with open("recorde.txt", 'w') as arquivo:
arquivo.write(str(self.jogador.pontuacao))
arquivo.close()
with open("recorde.txt", 'r') as arquivo:
self.recorde = int(arquivo.read())
arquivo.close()
self.jogador.vidas = 3
self.jogador.pontuacao = 0
self.pt = 0
self.jogador.grid_pos = vec(self.jogador.starting_pos)
self.jogador.pix_pos = self.jogador.get_pix_pos()
self.jogador.direcao *= 0
for fantasma in self.fantasmas:
fantasma.grid_pos = vec(fantasma.starting_pos)
fantasma.pix_pos = fantasma.get_pix_pos()
fantasma.direcao *= 0
self.moedas = []
with open("coisas.txt", 'r') as arquivo:
for indice_y, linha in enumerate(arquivo):
for indice_x, objeto in enumerate(linha):
if objeto == 'M':
self.moedas.append(vec(indice_x, indice_y))
arquivo.close()
self.state = "jogando"
def telaInicio_eventos(self):
for evento in pygame.event.get():
if evento.type == pygame.QUIT: # Se apertar o X, encerra o programa
self.executando = False
else:
#print(pygame.mouse.get_pos())
if pygame.mouse.get_pos()[0] >= 160 and pygame.mouse.get_pos()[0] <= 343:
if pygame.mouse.get_pos()[1] >= 328 and pygame.mouse.get_pos()[1] <= 442: #arrumar
self.passei = 1
else:
self.passei = 0
if evento.type == pygame.MOUSEBUTTONDOWN and self.passei == 1:
self.state = 'jogando'
def telaInicio_atualiza(self):
pass
def telaInicio_desenha(self): #O que vai ter na tela de inicio
self.janela.fill(PRETO)
if self.passei == 0:
self.janela.blit(self.inicio, (0, 0))
else:
self.janela.blit(self.aperta_jogar, (0, 0))
pygame.display.update()
def jogo_eventos(self):
for evento in pygame.event.get():
up = 0
down = 0
left = 0
right = 0
if evento.type == pygame.QUIT: # Se apertar o X, encerra o programa
self.executando = False
if evento.type == pygame.KEYDOWN:
for parede in self.paredes: #se o proximo mov bater numa parede, nao deixar movimentar
if [int(self.jogador.get_grid_posX()), int(self.jogador.get_grid_posY() - 1)] != [int(parede[0]), int(parede[1])]:
up += 1
if up == len(self.paredes) and (evento.key == pygame.K_UP or evento.key == pygame.K_w):
self.jogador.move(vec(0,-1))
self.jogador.angulo = 90
if [int(self.jogador.get_grid_posX()), int(self.jogador.get_grid_posY() + 1)] != [int(parede[0]), int(parede[1])]:
down += 1
if down == len(self.paredes) and (evento.key == pygame.K_DOWN or evento.key == pygame.K_s):
self.jogador.angulo = 270
self.jogador.move(vec(0, 1))
if [int(self.jogador.get_grid_posX() - 1), int(self.jogador.get_grid_posY())] != [int(parede[0]), int(parede[1])]:
left += 1
if left == len(self.paredes) and (evento.key == pygame.K_LEFT or evento.key == pygame.K_a):
self.jogador.angulo = 180
self.jogador.move(vec(-1, 0))
if [int(self.jogador.get_grid_posX() + 1), int(self.jogador.get_grid_posY())] != [int(parede[0]), int(parede[1])]:
right += 1
if right == len(self.paredes) and (evento.key == pygame.K_RIGHT or evento.key == pygame.K_d):
self.jogador.angulo = 0
self.jogador.move(vec(1, 0))
def jogo_atualiza(self):
self.pt = self.jogador.get_pontuacao()
self.jogador.atualiza()
for fantasma in self.fantasmas:
fantasma.atualiza()
for fantasma in self.fantasmas:
if fantasma.grid_pos == self.jogador.grid_pos:
self.perde_vida()
def jogo_desenha(self):
self.janela.fill(PRETO)
self.janela.blit(self.background, (ESPACOS_JOGO//2, ESPACOS_JOGO//2))
self.desenha_moedas()
#self.desenha_grid()
self.escreve_texto('SCORE: {}'.format(self.jogador.pontuacao), self.janela, [10,2], TAMANHO_FONTEJOGO, BRANCO, FONTE, centralizado=False)
self.escreve_texto('HIGH SCORE: {}'.format(self.recorde), self.janela, [LARGURA-160, 2], TAMANHO_FONTEJOGO, BRANCO, FONTE, centralizado=False)
self.jogador.desenha()
for fantasma in self.fantasmas:
fantasma.desenha()
pygame.display.update()
def perde_vida(self):
self.jogador.vidas -= 1
if self.jogador.vidas == 0:
self.state = "game over"
else:
self.jogador.grid_pos = vec(self.jogador.starting_pos)
self.jogador.pix_pos = self.jogador.get_pix_pos()
self.jogador.direcao *= 0
for fantasma in self.fantasmas:
fantasma.grid_pos = vec(fantasma.starting_pos)
fantasma.pix_pos = fantasma.get_pix_pos()
fantasma.direcao *= 0
#fantasma.pix_pos.x += 11
#fantasma.pix_pos.y += 10
def desenha_moedas(self):
for moeda in self.moedas:
pygame.draw.circle(self.janela, AMARELO, (int(ESPACOS_JOGO//2 + self.largura_quadradoGrid//2 + moeda.x*self.largura_quadradoGrid),
int(ESPACOS_JOGO//2 + self.altura_quadradoGrid//2 + moeda.y*self.altura_quadradoGrid)), 4)
def gameover_eventos(self):
for evento in pygame.event.get():
if evento.type == pygame.QUIT: # Se apertar o X, encerra o programa
self.executando = False
else:
if pygame.mouse.get_pos()[0] >= 107 and pygame.mouse.get_pos()[0] <= 494:
if pygame.mouse.get_pos()[1] >= 365 and pygame.mouse.get_pos()[1] <= 575: #arrumar
self.passei = 1
else:
self.passei = 0
if evento.type == pygame.MOUSEBUTTONDOWN and self.passei == 1:
self.reset()
def gameover_atualiza(self):
pass
def gameover_desenha(self): #O que vai ter na tela de inicio
self.janela.fill(PRETO)
if self.passei == 0:
self.janela.blit(self.perdeu, (0, 0))
else:
self.janela.blit(self.aperta_jogar_dnv, (0, 0))
pygame.display.update()
def get_pt(self):
return self.pt | UTF-8 | Python | false | false | 12,555 | py | 7 | Programa.py | 6 | 0.551873 | 0.541195 | 0 | 263 | 45.726236 | 150 |
DanRuckz/pacman | 11,811,160,076,912 | 99b9003e98fa6c5a449037c7fa6915ec85ae3aba | e0017d5824f124115266944e94e3fce7303e745c | /LoadFile.py | 81c09c37243ef468ae6f855f5384dc0dda040e30 | []
| no_license | https://github.com/DanRuckz/pacman | ffb120a78f7378cbca4f84e51a3be5963e1be636 | 7973cb1004c9e218f22726f42b7729f26887fbe0 | refs/heads/main | 2023-07-06T03:50:59.899213 | 2021-07-26T20:15:15 | 2021-07-26T20:15:15 | 342,968,039 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
map = pygame.image.load("spritesheet/30x30-tiles.png")
#Pacman = pygame.image.load("pacman.png")
#ghost = pygame.image.load("assets_old/ghost.png")
spritesheet = pygame.image.load("spritesheet/pacman.png")
| UTF-8 | Python | false | false | 223 | py | 17 | LoadFile.py | 9 | 0.748879 | 0.730942 | 0 | 7 | 30.714286 | 57 |
alvinkwekel/gpio-playground | 867,583,434,975 | 2a655a28370ea83a9b62091b572f8d9b50b12621 | 52dc7b0e126547e210c343a9aa1571a49878e4c7 | /led_board.py | 19c7d74a1901ae7aa980377b6023354c0d47ee02 | []
| no_license | https://github.com/alvinkwekel/gpio-playground | 6f3516eef4eb85a592cab3633af1f9b06f68978b | f1c2d4c4da59fff848f81c998427fdf676135ce7 | refs/heads/master | 2020-05-29T21:54:49.697919 | 2019-05-30T10:53:44 | 2019-05-30T10:53:44 | 189,397,012 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gpiozero import LEDBoard
from time import sleep
leds = LEDBoard(26, 19, 13, 6, 5)
while True:
leds.value = (1, 0, 0, 0, 0)
sleep(.1)
leds.value = (0, 1, 0, 0, 0)
sleep(.1)
leds.value = (0, 0, 1, 0, 0)
sleep(.1)
leds.value = (0, 0, 0, 1, 0)
sleep(.1)
leds.value = (0, 0, 0, 0, 1)
sleep(.1)
leds.value = (0, 0, 0, 0, 1)
sleep(.1)
leds.value = (0, 0, 0, 1, 0)
sleep(.1)
leds.value = (0, 0, 1, 0, 0)
sleep(.1)
leds.value = (0, 1, 0, 0, 0)
sleep(.1)
leds.value = (1, 0, 0, 0, 0)
sleep(.1)
| UTF-8 | Python | false | false | 577 | py | 3 | led_board.py | 3 | 0.47487 | 0.357019 | 0 | 28 | 19.607143 | 33 |
balajitummala/sagemaker-python-sdk | 10,393,820,901,781 | c9aa31f48f46387221cdb945097b2170cbb1fd0c | 417aa7cefaa3d14a23f9484b1cb2f41105bfbc21 | /src/sagemaker/clarify.py | dba978a209eca212863518f17505ee04a5497148 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/balajitummala/sagemaker-python-sdk | 4816244918cb27f7beaaf722c4be0ed817c1cea3 | 2594ffb3eaefaf55936b71ea0c38442135223602 | refs/heads/master | 2023-08-18T16:04:57.091104 | 2021-10-04T16:45:55 | 2021-10-04T16:45:55 | 256,359,718 | 0 | 0 | Apache-2.0 | true | 2021-10-05T22:07:10 | 2020-04-17T00:18:26 | 2020-05-04T22:33:36 | 2020-08-06T20:49:38 | 59,275 | 0 | 0 | 1 | Python | false | false | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module configures the SageMaker Clarify bias and model explainability processor job."""
from __future__ import print_function, absolute_import
import copy
from abc import ABC, abstractmethod
import json
import os
import tempfile
import re
from sagemaker.processing import ProcessingInput, ProcessingOutput, Processor
from sagemaker import image_uris, s3, utils
class DataConfig:
"""Config object related to configurations of the input and output dataset."""
def __init__(
self,
s3_data_input_path,
s3_output_path,
label=None,
headers=None,
features=None,
dataset_type="text/csv",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
):
"""Initializes a configuration of both input and output datasets.
Args:
s3_data_input_path (str): Dataset S3 prefix/object URI.
s3_output_path (str): S3 prefix to store the output.
label (str): Target attribute of the model required by bias metrics (optional for SHAP)
Specified as column name or index for CSV dataset, or as JSONPath for JSONLines.
headers (list[str]): A list of column names in the input dataset.
features (str): JSONPath for locating the feature columns for bias metrics if the
dataset format is JSONLines.
dataset_type (str): Format of the dataset. Valid values are "text/csv" for CSV,
"application/jsonlines" for JSONLines, and "application/x-parquet" for Parquet.
s3_data_distribution_type (str): Valid options are "FullyReplicated" or
"ShardedByS3Key".
s3_compression_type (str): Valid options are "None" or "Gzip".
"""
if dataset_type not in ["text/csv", "application/jsonlines", "application/x-parquet"]:
raise ValueError(
f"Invalid dataset_type '{dataset_type}'."
f" Please check the API documentation for the supported dataset types."
)
self.s3_data_input_path = s3_data_input_path
self.s3_output_path = s3_output_path
self.s3_data_distribution_type = s3_data_distribution_type
self.s3_compression_type = s3_compression_type
self.label = label
self.headers = headers
self.features = features
self.analysis_config = {
"dataset_type": dataset_type,
}
_set(features, "features", self.analysis_config)
_set(headers, "headers", self.analysis_config)
_set(label, "label", self.analysis_config)
def get_config(self):
"""Returns part of an analysis config dictionary."""
return copy.deepcopy(self.analysis_config)
class BiasConfig:
"""Config object related to bias configurations of the input dataset."""
def __init__(
self,
label_values_or_threshold,
facet_name,
facet_values_or_threshold=None,
group_name=None,
):
"""Initializes a configuration of the sensitive groups in the dataset.
Args:
label_values_or_threshold (Any): List of label values or threshold to indicate positive
outcome used for bias metrics.
facet_name (str or [str]): String or List of strings of sensitive attribute(s) in the
input data for which we like to compare metrics.
facet_values_or_threshold (list): Optional list of values to form a sensitive group or
threshold for a numeric facet column that defines the lower bound of a sensitive
group. Defaults to considering each possible value as sensitive group and
computing metrics vs all the other examples.
If facet_name is a list, this needs to be None or a List consisting of lists or None
with the same length as facet_name list.
group_name (str): Optional column name or index to indicate a group column to be used
for the bias metric 'Conditional Demographic Disparity in Labels - CDDL' or
'Conditional Demographic Disparity in Predicted Labels - CDDPL'.
"""
if isinstance(facet_name, str):
facet = {"name_or_index": facet_name}
_set(facet_values_or_threshold, "value_or_threshold", facet)
facet_list = [facet]
elif facet_values_or_threshold is None or len(facet_name) == len(facet_values_or_threshold):
facet_list = []
for i, single_facet_name in enumerate(facet_name):
facet = {"name_or_index": single_facet_name}
if facet_values_or_threshold is not None:
_set(facet_values_or_threshold[i], "value_or_threshold", facet)
facet_list.append(facet)
else:
raise ValueError("Wrong combination of argument values passed")
self.analysis_config = {
"label_values_or_threshold": label_values_or_threshold,
"facet": facet_list,
}
_set(group_name, "group_variable", self.analysis_config)
def get_config(self):
"""Returns part of an analysis config dictionary."""
return copy.deepcopy(self.analysis_config)
class ModelConfig:
"""Config object related to a model and its endpoint to be created."""
def __init__(
self,
model_name,
instance_count,
instance_type,
accept_type=None,
content_type=None,
content_template=None,
custom_attributes=None,
accelerator_type=None,
endpoint_name_prefix=None,
):
r"""Initializes a configuration of a model and the endpoint to be created for it.
Args:
model_name (str): Model name (as created by 'CreateModel').
instance_count (int): The number of instances of a new endpoint for model inference.
instance_type (str): The type of EC2 instance to use for model inference,
for example, 'ml.c5.xlarge'.
accept_type (str): The model output format to be used for getting inferences with the
shadow endpoint. Valid values are "text/csv" for CSV and "application/jsonlines".
Default is the same as content_type.
content_type (str): The model input format to be used for getting inferences with the
shadow endpoint. Valid values are "text/csv" for CSV and "application/jsonlines".
Default is the same as dataset format.
content_template (str): A template string to be used to construct the model input from
dataset instances. It is only used when "model_content_type" is
"application/jsonlines". The template should have one and only one placeholder
$features which will be replaced by a features list for to form the model inference
input.
custom_attributes (str): Provides additional information about a request for an
inference submitted to a model hosted at an Amazon SageMaker endpoint. The
information is an opaque value that is forwarded verbatim. You could use this
value, for example, to provide an ID that you can use to track a request or to
provide other metadata that a service endpoint was programmed to process. The value
must consist of no more than 1024 visible US-ASCII characters as specified in
Section 3.3.6. Field Value Components (
https://tools.ietf.org/html/rfc7230#section-3.2.6) of the Hypertext Transfer
Protocol (HTTP/1.1).
accelerator_type (str): The Elastic Inference accelerator type to deploy to the model
endpoint instance for making inferences to the model, see
https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html.
endpoint_name_prefix (str): The endpoint name prefix of a new endpoint. Must follow
pattern "^[a-zA-Z0-9](-\*[a-zA-Z0-9]".
"""
self.predictor_config = {
"model_name": model_name,
"instance_type": instance_type,
"initial_instance_count": instance_count,
}
if endpoint_name_prefix is not None:
if re.search("^[a-zA-Z0-9](-*[a-zA-Z0-9])", endpoint_name_prefix) is None:
raise ValueError(
"Invalid endpoint_name_prefix."
" Please follow pattern ^[a-zA-Z0-9](-*[a-zA-Z0-9])."
)
self.predictor_config["endpoint_name_prefix"] = endpoint_name_prefix
if accept_type is not None:
if accept_type not in ["text/csv", "application/jsonlines"]:
raise ValueError(
f"Invalid accept_type {accept_type}."
f" Please choose text/csv or application/jsonlines."
)
self.predictor_config["accept_type"] = accept_type
if content_type is not None:
if content_type not in ["text/csv", "application/jsonlines"]:
raise ValueError(
f"Invalid content_type {content_type}."
f" Please choose text/csv or application/jsonlines."
)
self.predictor_config["content_type"] = content_type
if content_template is not None:
if "$features" not in content_template:
raise ValueError(
f"Invalid content_template {content_template}."
f" Please include a placeholder $features."
)
self.predictor_config["content_template"] = content_template
_set(custom_attributes, "custom_attributes", self.predictor_config)
_set(accelerator_type, "accelerator_type", self.predictor_config)
def get_predictor_config(self):
"""Returns part of the predictor dictionary of the analysis config."""
return copy.deepcopy(self.predictor_config)
class ModelPredictedLabelConfig:
"""Config object to extract a predicted label from the model output."""
def __init__(
self,
label=None,
probability=None,
probability_threshold=None,
label_headers=None,
):
"""Initializes a model output config to extract the predicted label or predicted score(s).
The following examples show different parameter configurations depending on the endpoint:
* Regression Task: The model returns the score, e.g. 1.2. we don't need to specify
anything. For json output, e.g. {'score': 1.2} we can set 'label='score''.
* Binary classification:
* The model returns a single probability and we would like to classify as 'yes'
those with a probability exceeding 0.2.
We can set 'probability_threshold=0.2, label_headers='yes''.
* The model returns {'probability': 0.3}, for which we would like to apply a
threshold of 0.5 to obtain a predicted label in {0, 1}. In this case we can set
'label='probability''.
* The model returns a tuple of the predicted label and the probability.
In this case we can set 'label=0'.
* Multiclass classification:
* The model returns
{'labels': ['cat', 'dog', 'fish'], 'probabilities': [0.35, 0.25, 0.4]}.
In this case we would set the 'probability='probabilities'' and
'label='labels'' and infer the predicted label to be 'fish.'
* The model returns {'predicted_label': 'fish', 'probabilities': [0.35, 0.25, 0.4]}.
In this case we would set the 'label='predicted_label''.
* The model returns [0.35, 0.25, 0.4]. In this case, we can set
'label_headers=['cat','dog','fish']' and infer the predicted label to be 'fish.'
Args:
label (str or int): Index or JSONPath location in the model output for the prediction.
In case, this is a predicted label of the same type as the label in the dataset,
no further arguments need to be specified.
probability (str or int): Index or JSONPath location in the model output
for the predicted score(s).
probability_threshold (float): An optional value for binary prediction tasks in which
the model returns a probability, to indicate the threshold to convert the
prediction to a boolean value. Default is 0.5.
label_headers (list): List of label values - one for each score of the ``probability``.
"""
self.label = label
self.probability = probability
self.probability_threshold = probability_threshold
if probability_threshold is not None:
try:
float(probability_threshold)
except ValueError:
raise TypeError(
f"Invalid probability_threshold {probability_threshold}. "
f"Please choose one that can be cast to float."
)
self.predictor_config = {}
_set(label, "label", self.predictor_config)
_set(probability, "probability", self.predictor_config)
_set(label_headers, "label_headers", self.predictor_config)
def get_predictor_config(self):
"""Returns probability_threshold, predictor config."""
return self.probability_threshold, copy.deepcopy(self.predictor_config)
class ExplainabilityConfig(ABC):
"""Abstract config class to configure an explainability method."""
@abstractmethod
def get_explainability_config(self):
"""Returns config."""
return None
class SHAPConfig(ExplainabilityConfig):
"""Config class of SHAP."""
def __init__(
self,
baseline,
num_samples,
agg_method,
use_logit=False,
save_local_shap_values=True,
seed=None,
):
"""Initializes config for SHAP.
Args:
baseline (None or str or list): None or S3 object Uri or A list of rows (at least one)
to be used asthe baseline dataset in the Kernel SHAP algorithm. The format should
be the same as the dataset format. Each row should contain only the feature
columns/values and omit the label column/values. If None a baseline will be
calculated automatically by using K-means or K-prototypes in the input dataset.
num_samples (int): Number of samples to be used in the Kernel SHAP algorithm.
This number determines the size of the generated synthetic dataset to compute the
SHAP values.
agg_method (str): Aggregation method for global SHAP values. Valid values are
"mean_abs" (mean of absolute SHAP values for all instances),
"median" (median of SHAP values for all instances) and
"mean_sq" (mean of squared SHAP values for all instances).
use_logit (bool): Indicator of whether the logit function is to be applied to the model
predictions. Default is False. If "use_logit" is true then the SHAP values will
have log-odds units.
save_local_shap_values (bool): Indicator of whether to save the local SHAP values
in the output location. Default is True.
seed (int): seed value to get deterministic SHAP values. Default is None.
"""
if agg_method not in ["mean_abs", "median", "mean_sq"]:
raise ValueError(
f"Invalid agg_method {agg_method}." f" Please choose mean_abs, median, or mean_sq."
)
self.shap_config = {
"baseline": baseline,
"num_samples": num_samples,
"agg_method": agg_method,
"use_logit": use_logit,
"save_local_shap_values": save_local_shap_values,
}
if seed is not None:
self.shap_config["seed"] = seed
def get_explainability_config(self):
"""Returns config."""
return copy.deepcopy({"shap": self.shap_config})
class SageMakerClarifyProcessor(Processor):
"""Handles SageMaker Processing task to compute bias metrics and explain a model."""
_CLARIFY_DATA_INPUT = "/opt/ml/processing/input/data"
_CLARIFY_CONFIG_INPUT = "/opt/ml/processing/input/config"
_CLARIFY_OUTPUT = "/opt/ml/processing/output"
def __init__(
self,
role,
instance_count,
instance_type,
volume_size_in_gb=30,
volume_kms_key=None,
output_kms_key=None,
max_runtime_in_seconds=None,
sagemaker_session=None,
env=None,
tags=None,
network_config=None,
job_name_prefix=None,
version=None,
):
"""Initializes a ``Processor`` instance, computing bias metrics and model explanations.
Args:
role (str): An AWS IAM role name or ARN. Amazon SageMaker Processing
uses this role to access AWS resources, such as
data stored in Amazon S3.
instance_count (int): The number of instances to run
a processing job with.
instance_type (str): The type of EC2 instance to use for
processing, for example, 'ml.c4.xlarge'.
volume_size_in_gb (int): Size in GB of the EBS volume
to use for storing data during processing (default: 30).
volume_kms_key (str): A KMS key for the processing
volume (default: None).
output_kms_key (str): The KMS key ID for processing job outputs (default: None).
max_runtime_in_seconds (int): Timeout in seconds (default: None).
After this amount of time, Amazon SageMaker terminates the job,
regardless of its current status. If `max_runtime_in_seconds` is not
specified, the default value is 24 hours.
sagemaker_session (:class:`~sagemaker.session.Session`):
Session object which manages interactions with Amazon SageMaker and
any other AWS services needed. If not specified, the processor creates
one using the default AWS configuration chain.
env (dict[str, str]): Environment variables to be passed to
the processing jobs (default: None).
tags (list[dict]): List of tags to be passed to the processing job
(default: None). For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
network_config (:class:`~sagemaker.network.NetworkConfig`):
A :class:`~sagemaker.network.NetworkConfig`
object that configures network isolation, encryption of
inter-container traffic, security group IDs, and subnets.
job_name_prefix (str): Processing job name prefix.
version (str): Clarify version want to be used.
"""
container_uri = image_uris.retrieve("clarify", sagemaker_session.boto_region_name, version)
self.job_name_prefix = job_name_prefix
super(SageMakerClarifyProcessor, self).__init__(
role,
container_uri,
instance_count,
instance_type,
None, # We manage the entrypoint.
volume_size_in_gb,
volume_kms_key,
output_kms_key,
max_runtime_in_seconds,
None, # We set method-specific job names below.
sagemaker_session,
env,
tags,
network_config,
)
def run(self, **_):
"""Overriding the base class method but deferring to specific run_* methods."""
raise NotImplementedError(
"Please choose a method of run_pre_training_bias, run_post_training_bias or "
"run_explainability."
)
def _run(
self,
data_config,
analysis_config,
wait,
logs,
job_name,
kms_key,
experiment_config,
):
"""Runs a ProcessingJob with the Sagemaker Clarify container and an analysis config.
Args:
data_config (:class:`~sagemaker.clarify.DataConfig`): Config of the input/output data.
analysis_config (dict): Config following the analysis_config.json format.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when ``wait`` is True (default: True).
job_name (str): Processing job name.
kms_key (str): The ARN of the KMS key that is used to encrypt the
user code file (default: None).
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
"""
analysis_config["methods"]["report"] = {"name": "report", "title": "Analysis Report"}
with tempfile.TemporaryDirectory() as tmpdirname:
analysis_config_file = os.path.join(tmpdirname, "analysis_config.json")
with open(analysis_config_file, "w") as f:
json.dump(analysis_config, f)
s3_analysis_config_file = _upload_analysis_config(
analysis_config_file,
data_config.s3_output_path,
self.sagemaker_session,
kms_key,
)
config_input = ProcessingInput(
input_name="analysis_config",
source=s3_analysis_config_file,
destination=self._CLARIFY_CONFIG_INPUT,
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_compression_type="None",
)
data_input = ProcessingInput(
input_name="dataset",
source=data_config.s3_data_input_path,
destination=self._CLARIFY_DATA_INPUT,
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type=data_config.s3_data_distribution_type,
s3_compression_type=data_config.s3_compression_type,
)
result_output = ProcessingOutput(
source=self._CLARIFY_OUTPUT,
destination=data_config.s3_output_path,
output_name="analysis_result",
s3_upload_mode="EndOfJob",
)
super().run(
inputs=[data_input, config_input],
outputs=[result_output],
wait=wait,
logs=logs,
job_name=job_name,
kms_key=kms_key,
experiment_config=experiment_config,
)
def run_pre_training_bias(
self,
data_config,
data_bias_config,
methods="all",
wait=True,
logs=True,
job_name=None,
kms_key=None,
experiment_config=None,
):
"""Runs a ProcessingJob to compute the pre-training bias methods of the input data.
Computes the requested methods that compare 'methods' (e.g. fraction of examples) for the
sensitive group vs the other examples.
Args:
data_config (:class:`~sagemaker.clarify.DataConfig`): Config of the input/output data.
data_bias_config (:class:`~sagemaker.clarify.BiasConfig`): Config of sensitive groups.
methods (str or list[str]): Selector of a subset of potential metrics:
["`CI <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-bias-metric-class-imbalance.html>`_",
"`DPL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-true-label-imbalance.html>`_",
"`KL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-kl-divergence.html>`_",
"`JS <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-jensen-shannon-divergence.html>`_",
"`LP <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-lp-norm.html>`_",
"`TVD <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-total-variation-distance.html>`_",
"`KS <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-kolmogorov-smirnov.html>`_",
"`CDDL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-cddl.html>`_"].
Defaults to computing all.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when ``wait`` is True (default: True).
job_name (str): Processing job name. When ``job_name`` is not specified, if
``job_name_prefix`` in :class:`SageMakerClarifyProcessor` specified, the job name
will be composed of ``job_name_prefix`` and current timestamp; otherwise use
"Clarify-Pretraining-Bias" as prefix.
kms_key (str): The ARN of the KMS key that is used to encrypt the
user code file (default: None).
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
""" # noqa E501
analysis_config = data_config.get_config()
analysis_config.update(data_bias_config.get_config())
analysis_config["methods"] = {"pre_training_bias": {"methods": methods}}
if job_name is None:
if self.job_name_prefix:
job_name = utils.name_from_base(self.job_name_prefix)
else:
job_name = utils.name_from_base("Clarify-Pretraining-Bias")
self._run(data_config, analysis_config, wait, logs, job_name, kms_key, experiment_config)
def run_post_training_bias(
self,
data_config,
data_bias_config,
model_config,
model_predicted_label_config,
methods="all",
wait=True,
logs=True,
job_name=None,
kms_key=None,
experiment_config=None,
):
"""Runs a ProcessingJob to compute the post-training bias methods of the model predictions.
Spins up a model endpoint, runs inference over the input example in the
's3_data_input_path' to obtain predicted labels. Computes a the requested methods that
compare 'methods' (e.g. accuracy, precision, recall) for the sensitive group vs the other
examples.
Args:
data_config (:class:`~sagemaker.clarify.DataConfig`): Config of the input/output data.
data_bias_config (:class:`~sagemaker.clarify.BiasConfig`): Config of sensitive groups.
model_config (:class:`~sagemaker.clarify.ModelConfig`): Config of the model and its
endpoint to be created.
model_predicted_label_config (:class:`~sagemaker.clarify.ModelPredictedLabelConfig`):
Config of how to extract the predicted label from the model output.
methods (str or list[str]): Selector of a subset of potential metrics:
["`DPPL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dppl.html>`_"
, "`DI <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-di.html>`_",
"`DCA <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dca.html>`_",
"`DCR <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dcr.html>`_",
"`RD <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-rd.html>`_",
"`DAR <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dar.html>`_",
"`DRR <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-drr.html>`_",
"`AD <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-ad.html>`_",
"`CDDPL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-cddpl.html>`_
", "`TE <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-te.html>`_",
"`FT <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-ft.html>`_"].
Defaults to computing all.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when ``wait`` is True (default: True).
job_name (str): Processing job name. When ``job_name`` is not specified, if
``job_name_prefix`` in :class:`SageMakerClarifyProcessor` specified, the job name
will be composed of ``job_name_prefix`` and current timestamp; otherwise use
"Clarify-Posttraining-Bias" as prefix.
kms_key (str): The ARN of the KMS key that is used to encrypt the
user code file (default: None).
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
"""
analysis_config = data_config.get_config()
analysis_config.update(data_bias_config.get_config())
(
probability_threshold,
predictor_config,
) = model_predicted_label_config.get_predictor_config()
predictor_config.update(model_config.get_predictor_config())
analysis_config["methods"] = {"post_training_bias": {"methods": methods}}
analysis_config["predictor"] = predictor_config
_set(probability_threshold, "probability_threshold", analysis_config)
if job_name is None:
if self.job_name_prefix:
job_name = utils.name_from_base(self.job_name_prefix)
else:
job_name = utils.name_from_base("Clarify-Posttraining-Bias")
self._run(data_config, analysis_config, wait, logs, job_name, kms_key, experiment_config)
def run_bias(
self,
data_config,
bias_config,
model_config,
model_predicted_label_config=None,
pre_training_methods="all",
post_training_methods="all",
wait=True,
logs=True,
job_name=None,
kms_key=None,
experiment_config=None,
):
"""Runs a ProcessingJob to compute the requested bias methods.
It computes the metrics of both the pre-training methods and the post-training methods.
To calculate post-training methods, it needs to spin up a model endpoint, runs inference
over the input example in the 's3_data_input_path' to obtain predicted labels.
Args:
data_config (:class:`~sagemaker.clarify.DataConfig`): Config of the input/output data.
bias_config (:class:`~sagemaker.clarify.BiasConfig`): Config of sensitive groups.
model_config (:class:`~sagemaker.clarify.ModelConfig`): Config of the model and its
endpoint to be created.
model_predicted_label_config (:class:`~sagemaker.clarify.ModelPredictedLabelConfig`):
Config of how to extract the predicted label from the model output.
pre_training_methods (str or list[str]): Selector of a subset of potential metrics:
["`CI <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-bias-metric-class-imbalance.html>`_",
"`DPL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-true-label-imbalance.html>`_",
"`KL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-kl-divergence.html>`_",
"`JS <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-jensen-shannon-divergence.html>`_",
"`LP <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-lp-norm.html>`_",
"`TVD <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-total-variation-distance.html>`_",
"`KS <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-kolmogorov-smirnov.html>`_",
"`CDDL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-data-bias-metric-cddl.html>`_"].
Defaults to computing all.
post_training_methods (str or list[str]): Selector of a subset of potential metrics:
["`DPPL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dppl.html>`_"
, "`DI <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-di.html>`_",
"`DCA <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dca.html>`_",
"`DCR <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dcr.html>`_",
"`RD <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-rd.html>`_",
"`DAR <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-dar.html>`_",
"`DRR <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-drr.html>`_",
"`AD <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-ad.html>`_",
"`CDDPL <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-cddpl.html>`_
", "`TE <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-te.html>`_",
"`FT <https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-post-training-bias-metric-ft.html>`_"].
Defaults to computing all.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when ``wait`` is True (default: True).
job_name (str): Processing job name. When ``job_name`` is not specified, if
``job_name_prefix`` in :class:`SageMakerClarifyProcessor` specified, the job name
will be composed of ``job_name_prefix`` and current timestamp; otherwise use
"Clarify-Bias" as prefix.
kms_key (str): The ARN of the KMS key that is used to encrypt the
user code file (default: None).
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
""" # noqa E501
analysis_config = data_config.get_config()
analysis_config.update(bias_config.get_config())
analysis_config["predictor"] = model_config.get_predictor_config()
if model_predicted_label_config:
(
probability_threshold,
predictor_config,
) = model_predicted_label_config.get_predictor_config()
if predictor_config:
analysis_config["predictor"].update(predictor_config)
if probability_threshold is not None:
analysis_config["probability_threshold"] = probability_threshold
analysis_config["methods"] = {
"pre_training_bias": {"methods": pre_training_methods},
"post_training_bias": {"methods": post_training_methods},
}
if job_name is None:
if self.job_name_prefix:
job_name = utils.name_from_base(self.job_name_prefix)
else:
job_name = utils.name_from_base("Clarify-Bias")
self._run(data_config, analysis_config, wait, logs, job_name, kms_key, experiment_config)
def run_explainability(
self,
data_config,
model_config,
explainability_config,
model_scores=None,
wait=True,
logs=True,
job_name=None,
kms_key=None,
experiment_config=None,
):
"""Runs a ProcessingJob computing for each example in the input the feature importance.
Currently, only SHAP is supported as explainability method.
Spins up a model endpoint.
For each input example in the 's3_data_input_path' the SHAP algorithm determines
feature importance, by creating 'num_samples' copies of the example with a subset
of features replaced with values from the 'baseline'.
Model inference is run to see how the prediction changes with the replaced features.
If the model output returns multiple scores importance is computed for each of them.
Across examples, feature importance is aggregated using 'agg_method'.
Args:
data_config (:class:`~sagemaker.clarify.DataConfig`): Config of the input/output data.
model_config (:class:`~sagemaker.clarify.ModelConfig`): Config of the model and its
endpoint to be created.
explainability_config (:class:`~sagemaker.clarify.ExplainabilityConfig`): Config of the
specific explainability method. Currently, only SHAP is supported.
model_scores(str|int|ModelPredictedLabelConfig): Index or JSONPath location in the
model output for the predicted scores to be explained. This is not required if the
model output is a single score. Alternatively, an instance of
ModelPredictedLabelConfig can be provided.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when ``wait`` is True (default: True).
job_name (str): Processing job name. When ``job_name`` is not specified, if
``job_name_prefix`` in :class:`SageMakerClarifyProcessor` specified, the job name
will be composed of ``job_name_prefix`` and current timestamp; otherwise use
"Clarify-Explainability" as prefix.
kms_key (str): The ARN of the KMS key that is used to encrypt the
user code file (default: None).
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
"""
analysis_config = data_config.get_config()
predictor_config = model_config.get_predictor_config()
if isinstance(model_scores, ModelPredictedLabelConfig):
probability_threshold, predicted_label_config = model_scores.get_predictor_config()
_set(probability_threshold, "probability_threshold", analysis_config)
predictor_config.update(predicted_label_config)
else:
_set(model_scores, "label", predictor_config)
analysis_config["methods"] = explainability_config.get_explainability_config()
analysis_config["predictor"] = predictor_config
if job_name is None:
if self.job_name_prefix:
job_name = utils.name_from_base(self.job_name_prefix)
else:
job_name = utils.name_from_base("Clarify-Explainability")
self._run(data_config, analysis_config, wait, logs, job_name, kms_key, experiment_config)
def _upload_analysis_config(analysis_config_file, s3_output_path, sagemaker_session, kms_key):
"""Uploads the local analysis_config_file to the s3_output_path.
Args:
analysis_config_file (str): File path to the local analysis config file.
s3_output_path (str): S3 prefix to store the analysis config file.
sagemaker_session (:class:`~sagemaker.session.Session`):
Session object which manages interactions with Amazon SageMaker and
any other AWS services needed. If not specified, the processor creates
one using the default AWS configuration chain.
kms_key (str): The ARN of the KMS key that is used to encrypt the
user code file (default: None).
Returns:
The S3 uri of the uploaded file.
"""
return s3.S3Uploader.upload(
local_path=analysis_config_file,
desired_s3_uri=s3_output_path,
sagemaker_session=sagemaker_session,
kms_key=kms_key,
)
def _set(value, key, dictionary):
"""Sets dictionary[key] = value if value is not None."""
if value is not None:
dictionary[key] = value
| UTF-8 | Python | false | false | 45,118 | py | 30 | clarify.py | 17 | 0.616827 | 0.613724 | 0 | 846 | 52.330969 | 130 |
dkristensen/IFT-6135-HW-3 | 11,089,605,577,887 | c64f1e6c77c4f1ec5701169aef61d524a6636963 | fec407156dd2ca4c345c578518dada406cede8fa | /question_2.py | 676b4cc134db573dfca5018644ad6e063f3c794f | []
| no_license | https://github.com/dkristensen/IFT-6135-HW-3 | 07768357afc047689c36803d80843cf105667b78 | d8f5b085c3455306b4c7402e41405553427b6b6b | refs/heads/master | 2020-05-16T08:41:05.170371 | 2019-04-26T12:49:01 | 2019-04-26T12:49:01 | 182,922,022 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import numpy as np
import utils
import matplotlib.pyplot as plt
from models import q2_VAE as VAE
import time
def train_epoch(model, train_dataloader, optimizer, loss_fn):
"""Train the given model for one epoch.
Longer Summary
Args:
model (torch.nn.Module): Module to use for the training process
train_dataloader (torch.utils.data.DataLoader): Dataset which we use to train the model
optimizer (torch.optim.Optimizer): Optimizer for the model's parameters utilizing backprop
loss_fn (any loss function): The loss function we use to optimize the model
"""
model.train()
total_training_loss = 0
for batch_index, batch in enumerate(train_dataloader):
batch = batch[0].view(-1,1,28,28).float()
output_batch = model(batch)
loss = loss_fn(batch, output_batch, model.prev_means, model.prev_vars)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_training_loss += loss
def validate(model,val_dataloader,loss_fn):
"""Computes the ELBO for a model.
Args:
model (torch.nn.Module): Module to use for computing the ELBO
val_dataloader (torch.utils.data.DataLoader): Dataset which we use to compute the ELBO
loss_fn (any loss function): The loss function, in our case, the ELBO, we use to check our model
Returns:
float: The ELBO of our model for the given dataset
"""
model.eval()
total_loss = 0
for batch_index, batch in enumerate(val_dataloader):
batch = batch[0].view(-1,1,28,28).float()
output_batch = model(batch)
total_loss += loss_fn(batch, output_batch, model.prev_means, model.prev_vars)
total_loss *= float(val_dataloader.batch_size) / len(val_dataloader.dataset)
return total_loss
def importance_sampling_function(model, xs, zs):
"""Computes the log probabilities of data given samples and a model
Args:
model (torch.nn.Module): Model to use for our importance sampling estimations
xs (torch.Tensor): Tensor holding real inputs from our dataset
zs (torch.Tensor): Samples of noise to use with our generator
Returns:
list: List of log probabilities of the input data given our model
"""
M = xs.shape[0]
D = xs.shape[1]
K = zs.shape[1]
L = zs.shape[2]
assert(xs.shape[0] is zs.shape[0])
importance_probabilities = []
BCE_error = torch.nn.BCELoss(reduction='none')
with torch.no_grad():
for datum,latent_data in zip(xs,zs):
datum = datum.float()
# Run the datum through the network to set the values
model_output = model(datum.view(1,1,28,28))
# Get the parameters of the distribution from the model's encoder
est_mean, est_logvar = model.prev_means, model.prev_vars
est_var = torch.exp(est_logvar/2)
## Compute the probability for the datum ##
# Get the result of our sample
model_output = model.decode(latent_data.view(-1,L)).view(-1,28**2)
# Compare the latent sample with the gaussian our model predicts
model_prob = torch.sum(utils.compute_log_normal_density(latent_data, est_mean, est_var),dim=1)#normal_for_comparison.log_prob(latent_data)
# Compare with an isotropic gaussian for importance sample
iso_prob = torch.sum(utils.compute_log_normal_density(latent_data,torch.zeros(est_mean.shape[0]),torch.ones(est_mean.shape[0])),dim=1)
# Get the error of our sample with the datum
stacked_datum = torch.stack([datum for position in range(K)])
prob_given_latent = -torch.sum(BCE_error(model_output,stacked_datum),dim=1)
unscaled_p_datum = prob_given_latent + model_prob - iso_prob
max_p_datum = torch.max(unscaled_p_datum)
# Since all of these are log probs, we add them up and then exponentiate
p_datum = max_p_datum + torch.log(torch.sum(torch.exp(unscaled_p_datum-max_p_datum)))
# Take the average prob and get the log (as per instructions_)
importance_probabilities.append(-p_datum)
return importance_probabilities
def log_likelihood(model, dataloader, K=200):
"""Estimates the log likelihood of our model given a dataloader
using importance sampling.
Args:
model (torch.nn.Module): Our model to find the log-likhood of
dataloader (torch.utils.data.DataLoader): Dataset to test with
Returns:
float: the log likelihood estimate of our model over the dataset
"""
total_sum = 0
importance_values = []
zs_batch = torch.randn((dataloader.batch_size, K, 100))
for i, minibatch in enumerate(dataloader):
minibatch = minibatch[0]
importance_values += importance_sampling_function(model, minibatch, zs_batch[:len(minibatch)])
return torch.mean(torch.stack(importance_values))
if __name__ == "__main__":
start_time = time.time()
print("Creating Model ... ")
my_VAE = VAE()
optimizer = torch.optim.Adam(my_VAE.parameters(), lr=3e-4)
loss_fn = utils.ELBO
print("Loading Data ... ")
loaders = utils.get_BMNIST_dataloaders()
data_points = [[],[]]
print("Starting Training ... ")
for i in range(20):
train_epoch(my_VAE,loaders['train'],optimizer,loss_fn)
val_loss = validate(my_VAE,loaders['valid'],loss_fn).item()
print("Epoch: {}\t ELBO: {}".format(i+1,-val_loss))
data_points[0].append(i+1)
data_points[1].append(-val_loss)
plt.figure()
plt.plot(data_points[0],data_points[1],label='Validation ELBO')
plt.plot(data_points[0],[-96]*len(data_points[0]),label='Q2 Min Elbo')
plt.legend()
plt.show()
print("Testing Model ...")
print("Validation ELBO: {}".format(-validate(my_VAE,loaders['valid'],loss_fn)))
print("Test ELBO: {}".format(-validate(my_VAE,loaders['test'],loss_fn)))
print("Validation LL: {}".format(log_likelihood(my_VAE,loaders['valid'])))
print("Test LL: {}".format(log_likelihood(my_VAE,loaders['test'])))
print("Running Time of Script: {}".format(time.time()-start_time))
| UTF-8 | Python | false | false | 6,228 | py | 9 | question_2.py | 8 | 0.645472 | 0.635356 | 0 | 148 | 41.054054 | 150 |
sharonmaswai/edsa_analyse_project | 9,801,115,399,556 | 66c1c0f4f62ea17d0526c19bf357732ce63fa1d5 | afaad81ae999850a65fbeb8a95e8654cfe5e7c64 | /tests/test.py | 272694f35405d10e8c674f218c507e0d02bdbe87 | []
| no_license | https://github.com/sharonmaswai/edsa_analyse_project | dd08572892f0ab5bfa56b217fabe5ab5d437c233 | 0537e10ff55986b9934123760cd4168cedd54e26 | refs/heads/master | 2022-12-22T23:05:23.856239 | 2020-09-12T17:48:39 | 2020-09-12T17:48:39 | 293,762,793 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from analyse import analyse
def test_metrics_dict():
assert analyse.dictionary_of_metrics(gauteng)=={'max': 39660.0, 'mean': 26244.42, 'median': 24403.5, 'min': 8842.0, 'std': 10400.01, 'var': 108160153.17}, 'Failed'
def test_summary():
assert analyse.five_num_summary(gauteng)=={'max': 39660.0, 'median': 24403.5,'min': 8842.0,'q1': 18653.0,'q3': 36372.0}, 'Failed'
def test_date_parse():
assert date_parser([:3])==['2019-11-29', '2019-11-29', '2019-11-29'], 'Failed'
assert date_parser([:-3])== ['2019-11-20', '2019-11-20', '2019-11-20'], 'Failed' | UTF-8 | Python | false | false | 567 | py | 5 | test.py | 3 | 0.634921 | 0.417989 | 0 | 10 | 55.8 | 167 |
rhiju/rhiju_python | 3,487,513,455,165 | 533aa35c30faf9ae91d29f3be338f5a36b0ad9b6 | 3dcc44bf8acd3c6484b57578d8c5595d8119648d | /extract_spreadscore_decoys_outfile.py | a02f2f5de4c5108b3e79c9da62314ef83e94dddd | []
| no_license | https://github.com/rhiju/rhiju_python | f0cab4dfd4dd75b72570db057a48e3d65e1d92c6 | eeab0750fb50a3078a698d190615ad6684dc2411 | refs/heads/master | 2022-10-29T01:59:51.848906 | 2022-10-04T21:28:41 | 2022-10-04T21:28:41 | 8,864,938 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
from sys import argv,exit
from os import popen, system
from os.path import basename
import string
def Help():
print
print 'Usage: '+argv[0]+' <silent out file 1> < silent file 2> ... <N> '
print ' Will extract N decoys with lowest score from each silent file.'
print ' If you want to select based on another column, say 12 (Rg), the'
print ' last arguments should be -12 <N> (for lowest Rg) or +12 <N>'
print ' (for highest Rg).'
print
exit()
if len(argv)<2:
Help()
try:
NSTRUCT = int(argv[-1])
del(argv[-1])
except:
NSTRUCT = 2
scorecol_defined = 0
try:
SCORECOL = int(argv[-1])
del(argv[-1])
scorecol_defined = 1
except:
SCORECOL = -1
REVERSE = ''
if SCORECOL > 0:
REVERSE = ' --reverse '
#Another possibility... user supplies -rms or +rms
scorecol_name_defined = 0
if not scorecol_defined:
scorecol_name = argv[-1]
if scorecol_name[0] == '-':
scorecol_name_defined = 1
scorecol_name = scorecol_name[1:]
del( argv[-1] )
REVERSE = ''
if scorecol_name[0] == '+':
scorecol_name_defined = 1
scorecol_name = scorecol_name[1:]
REVERSE = '--reverse'
del( argv[-1] )
infiles = argv[1:]
for infile in infiles:
tags = []
scoretags = string.split( popen('head -n 2 '+infile).readlines()[1] )
scoretag=''
if scorecol_defined:
scoretag = scoretags[ abs(SCORECOL) ]
if scorecol_name_defined:
assert( scoretags.count( scorecol_name ))
SCORECOL = scoretags.index( scorecol_name )
scoretag = scorecol_name
assert(infile[-3:] == 'out')
# print 'grep SCORE '+infile+' | sort -k %d -n %s | head -n %d' % (abs(SCORECOL)+1, REVERSE, NSTRUCT+1)
lines = popen('grep SCORE '+infile+' | grep -v NATIVE | sort -k %d -n %s ' % (abs(SCORECOL)+1, REVERSE)).readlines()
templist_name = 'temp.%s.list'% basename(infile)
fid = open(templist_name,'w')
count = 0
numlines = len(lines)
for n in range(NSTRUCT):
cols = string.split(lines[ numlines*n/NSTRUCT] )
tag = cols[-1]
if tag.find('desc') < 0:
fid.write(tag+'\n')
tags.append(tag)
count = count+1
if count >= NSTRUCT:
break
outfilename = infile
fid.close()
command = 'head -n 2 '+infile
system(command)
count = 1
fid = open( infile )
line = fid.readline()
writeout = 0
while line:
cols = string.split(line)
if (len(cols)>1 and cols[0]=='SCORE:'):
if tags.count(cols[-1]) > 0:
writeout = 1
else:
writeout = 0
if writeout:
print line[:-1]
line = fid.readline()
command = 'rm '+templist_name
print(command)
system(command)
| UTF-8 | Python | false | false | 2,809 | py | 340 | extract_spreadscore_decoys_outfile.py | 340 | 0.571378 | 0.553578 | 0 | 124 | 21.637097 | 120 |
vaibhav0077/To-Do-ListApp-django | 16,192,026,745,221 | 63216ed6c9a90488793c15c520bf0cf3f8b89c6f | cdc542c2ce0ff784f4593968c98e9f213757a4fa | /base/forms.py | e6a1cbd0d90d4df7efe5ef911270c769b9c3a77b | []
| no_license | https://github.com/vaibhav0077/To-Do-ListApp-django | 95e89d366627727232316a6828179ef53b737f93 | 690d2dc47cd857dc4145cd8d56a42471fb33d5bc | refs/heads/master | 2023-08-31T12:41:52.408689 | 2021-10-29T03:20:48 | 2021-10-29T03:20:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # from To_Do_List.base.models import TODO
from django.contrib.auth import forms
from django.contrib.auth import models
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms import fields, widgets, ModelForm
from django import forms
from django.forms.forms import Form
from base.models import TODO
class DateInput(forms.DateInput):
input_type = 'date'
class SignUpform(UserCreationForm):
password2 = forms.CharField(label='Confirm Password(again)',
widget=forms.widgets.PasswordInput)
class Meta:
model = User
fields = ['first_name', 'last_name', 'username', 'email']
labels = {'email': 'Email'}
class LoginUpform(UserCreationForm):
class Meta:
model = User
fields = ['username']
class TODOForm(ModelForm):
# start_date = forms.DateField(widget=DateInput)
end_date = forms.DateField(widget=DateInput)
class Meta:
model = TODO
fields = ['title', 'priority', 'end_date']
# labels = {'end_date':'End date(yyyy-mm-dd)'}
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
# fields=['username','email']
fields = ['first_name', 'last_name', 'username', 'email']
class viewTodoForm(TODOForm):
class Meta:
model = TODO
fields = ['title', 'status', 'priority', 'end_date']
# label = {'end_date':'end_date(yyyy-mm-dd)'}
class changePassForm(forms.ModelForm):
currentPassword = forms.CharField(
label='Current Password', widget=forms.widgets.PasswordInput)
newPassword = forms.CharField(
label='New Password', widget=forms.widgets.PasswordInput)
confirmPassword = forms.CharField(
label='Confirm Password', widget=forms.widgets.PasswordInput)
class Meta:
model = User
fields = ['currentPassword', 'newPassword', 'confirmPassword']
| UTF-8 | Python | false | false | 1,981 | py | 29 | forms.py | 12 | 0.661787 | 0.661282 | 0 | 70 | 27.3 | 70 |
sunnycia/pima-classifier-tf | 6,193,342,872,386 | a75404442f13f1416762498f4dc1ec82066528a0 | ea865843b65416f60204fee03380562feedfc428 | /input_data.py | f4f2cb7319dffb17110a2abadeac7c90e95567d7 | []
| no_license | https://github.com/sunnycia/pima-classifier-tf | 55ed3d2c10fd7d4629ee7fe882dc1e619280f288 | ca2d4d9ca9557b13f7b6133f77f6111fcd129c1e | refs/heads/master | 2020-04-16T18:00:10.476301 | 2019-01-15T06:40:10 | 2019-01-15T06:40:10 | 165,798,898 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import os
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSet(object):
def __init__(self, datas, labels):
assert datas.shape[0] == labels.shape[0], (
"datas.shape: %s labels.shape: %s" % (datas.shape,
labels.shape))
self._num_examples = datas.shape[0]
self._datas = datas.astype(np.float32)
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def datas(self):
return self._datas
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
# print "A epoch is complete. Now shuffle the set and begin next epoch"
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._datas = self._datas[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self._datas[start:end], self._labels[start:end]
filePath = "Pima-training-set.txt"
def read_dataset(filePath):
f = open(filePath)
line = f.readline()
count = 0
labellist = []
while line:
linelist = line.split()
dat_arr = np.array(linelist)[:8]
lbl = int(linelist[8]) - 1
# print lbl;exit()
# print dat_arr.shape
if not count == 0:
data = np.concatenate((data, dat_arr[np.newaxis, ...]), axis=0)
# print type(labellist)
labellist.append(lbl)
else:
data = dat_arr[np.newaxis, ...]
labellist = [lbl]
print type(labellist),labellist;
count += 1
line = f.readline()
label = dense_to_one_hot(np.array(labellist))
# print label.shape;
# print label;exit()
perm = np.arange(len(data))
np.random.shuffle(perm)
data = data[perm]
label = label[perm]
# print data.shape
# print label.shape
return DataSet(data, label)
# read_dataset(filePath) | UTF-8 | Python | false | false | 2,934 | py | 6 | input_data.py | 4 | 0.549761 | 0.543286 | 0 | 89 | 30.988764 | 83 |
ouceduxzk/kaggle-ndsb2 | 6,330,781,822,067 | afe0b80f5d1054dd7f052672b3951f7f6f108e85 | 46cd9164abc0c7383ec26503583ca42bb3763761 | /model.py | 5f0f110600e4ce6df37bf43ce2a60390514d3459 | []
| no_license | https://github.com/ouceduxzk/kaggle-ndsb2 | ddf93add3fd03c13df9787d89d449abc8f911ee5 | 45a4af0157f8a883cd987ac4a07414480afd8ff8 | refs/heads/master | 2016-08-09T14:49:08.857944 | 2016-02-13T21:21:52 | 2016-02-13T21:21:52 | 51,666,049 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
def center_normalize(x):
"""
Custom activation for online sample-wise center and std. normalization
"""
return (x - K.mean(x)) / K.std(x)
def get_model():
model = Sequential()
model.add(Activation(activation=center_normalize, input_shape=(30, 64, 64)))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='valid'))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(96, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(96, 3, 3, border_mode='valid'))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, W_regularizer=l2(1e-3)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
return model
def VGG_16_112(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(30,112,112)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
#model.add(Dense(1024, W_regularizer=l2(1e-3)))
model.add(Dense(1 , W_regularizer=l2(1e-3)))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
if weights_path:
model.load_weights(weights_path)
return model
def VGG_16_112_2(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(15,112,112)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
#model.add(Dense(1024, W_regularizer=l2(1e-3)))
model.add(Dense(1 , W_regularizer=l2(1e-3)))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
if weights_path:
model.load_weights(weights_path)
return model
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(30,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
#model.add(Dense(1024, W_regularizer=l2(1e-3)))
model.add(Dense(1 , W_regularizer=l2(1e-3)))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
if weights_path:
model.load_weights(weights_path)
return model
def VGG_19(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(30,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
#model.add(Dense(1000, activation='softmax'))
model.add(Dense(1 , W_regularizer=l2(1e-3)))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
if weights_path:
model.load_weights(weights_path)
return model
| UTF-8 | Python | false | false | 9,600 | py | 7 | model.py | 6 | 0.656979 | 0.575313 | 0 | 262 | 35.637405 | 81 |
netmanchris/pykanyerest | 17,351,667,901,245 | c521282a6250e4ff227d53b2602557e5b2e8071a | 55d78df9898e3f698ff7abb405569d0ae4d7c3d8 | /pykanyerest/quotes.py | 531ef54ce14e3df6cadc97ee16a22e1a7e7e0437 | [
"Apache-2.0"
]
| permissive | https://github.com/netmanchris/pykanyerest | 9943229e77ca5547f744619772266f08c363069e | 223e264c9d0bb8756448c4687130d5d0f6ea2e46 | refs/heads/master | 2020-12-21T19:20:09.849010 | 2020-01-27T16:19:21 | 2020-01-27T16:19:21 | 236,533,692 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests, json
def get_new_quote():
url= 'https://api.kanye.rest'
r = requests.get(url)
quote = json.loads(r.text)
print ("New Kanye Quote coming up!")
return quote
| UTF-8 | Python | false | false | 193 | py | 3 | quotes.py | 2 | 0.637306 | 0.637306 | 0 | 8 | 23.125 | 40 |
YYN117/Demo | 4,964,982,228,490 | 40371be35d77d37a98c98ed67ab673b320edefe8 | aa28417be8935d6fa369fcb526174f9e1e30479a | /qianfeng/day11/ATM/银行自助提款机.py | 662a6a89eefcd47ab68ce4d00317d3fcdf0a1d92 | []
| no_license | https://github.com/YYN117/Demo | d6fca95ed8a1a433ef06f1f3fc2e768414e863cb | 40690040a7422fd5d8f03a0d68f20f1be5d4a836 | refs/heads/master | 2020-04-14T12:29:30.129709 | 2019-01-02T13:31:10 | 2019-01-02T13:31:10 | 163,841,708 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
人
类名:Person
属性:姓名,身份证,电话,卡
行为
卡
类名:Card
属性:卡号,密码,余额
行为
提款机
类名:ATM
属性:
行为:开户,查询,取款,存款,转账,改密码,锁定,解锁,读卡,销户,退出
界面类
类名:View
属性:
行为:管理员界面 管理员登录 系统功能界面
'''
from view import View
from atm import ATM
import time
import pickle
import os
def main():
#用户数据
allUsers = {}
#界面
view = View()
view.printAdminView()
#管理员开机
if view.adminOption():
return -1
filepath = os.path.join(os.getcwd(), 'allusers.txt')
f = open(filepath ,'rb')
allUsers = pickle.load(f)
print(allUsers)
atm = ATM(allUsers)
while True:
view.printSysFunctionView()
#等待用户操作
option = input('输入你的操作:')
if option == '1':
atm.createUser()
elif option == '2':
atm.searchUserInfo()
elif option == '3':
print('取款')
elif option == '4':
print('存款')
elif option == '5':
print('转账')
elif option == '6':
print('改密')
elif option == '7':
atm.lockUser()
elif option == '8':
atm.unlockUser()
elif option == '9':
print('补卡')
elif option == '0':
print('销户')
elif option == 't':
if not view.adminOption():
f = open(filepath,'wb')
pickle.dump(atm.allUsers,f)
f.close()
print(filepath)
return -1
time.sleep(2)
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 1,777 | py | 296 | 银行自助提款机.py | 276 | 0.483502 | 0.474747 | 0 | 80 | 17.575 | 56 |
nanersmariee/Labs_AnnaMarie | 5,514,738,051,288 | caf2e6aa6575548b214e5272d309b9121d49d79d | 385ba6b52414042ccc1a44fb5c143fee86a7d194 | /calculator1.py | c76e2f99649bcbaf2cc768f6627f19fc0a0341d9 | []
| no_license | https://github.com/nanersmariee/Labs_AnnaMarie | b5cdb2c95a90e35169d038c68284e154f74ee21e | 75de2327d11aa8e6963ecd7dab290ed646883477 | refs/heads/master | 2022-12-09T03:35:50.411671 | 2020-01-19T00:12:28 | 2020-01-19T00:12:28 | 216,278,357 | 0 | 0 | null | false | 2021-06-02T00:42:47 | 2019-10-19T22:30:35 | 2020-01-19T00:12:45 | 2021-06-02T00:42:45 | 16,850 | 0 | 0 | 16 | TSQL | false | false |
"""repeat forever"""
from arithmetic1 import *
"""import arithmetic functions for calculator"""
while True:
"""read input"""
input_string = input("Please enter parameters> ")
print(input_string)
"""tokenize input"""
tokens = (input_string.split(" "))
print(tokens)
"""If token is Q - quitting"""
if tokens[0] == "q":
print("You are quitting")
break
# elif len(tokens) < 2:
# print("Not enough parameters entered")
"""Assigned token indexes (operators and num)"""
if len(tokens) == 2:
operator = tokens[0]
num1 = tokens[1]
num2 = 0
print(operator, num1, num2)
elif len(tokens) == 3:
operator = tokens[0]
num1 = float(tokens[1])
num2 = float(tokens[2])
if operator == "+":
result = add(num1,num2)
elif operator == "-":
result = subtract(num1,num2)
elif operator == "*":
result = multiply(num1, num2)
elif operator == "/":
result = divide(num1, num2)
elif operator == "pow" :
result = power(num1,num2)
elif operator == "mod":
result = mod(num1,num2)
print(result)
# add(float, float) → float
# Return the sum of the two inputs.
# subtract(float, float) → float
# Return the second number subtracted from the first.
# multiply(float, float) → float
# Multiply the two inputs together and return the result.
# divide(float, float) → float
# Divide the first input by the second and return the result.
# square(float) → float
# Return the square of the input.
# cube(float) → float
# Return the cube of the input.
# power(float, float) → float
# Raise the first input to the power of the second and return the result.
# mod(float, float) → float
# Divide the first input by the second input and return the remainder.
# """assign 0 to token [3] if less than 2 numbers"""
# elif len(tokens) < 3:
# num2 = 0
# else:
# num2 = (tokens[2])
# elif tokens[0] == "+":
# print(add(tokens[1], tokens[2]))
# elif tokens[0] == "-":
# print(tokens[1] - tokens[2])
# else:
# # # decide which math function to call based on first token
# """A prefix-notation calculator."""
# from arithmetic1 import *
# while True:
# user_input = input("> ")
# tokens = user_input.split(" ")
# if "q" in tokens:
# print("You will exit.")
# break
# elif len(tokens) < 2:
# print("Not enough inputs.")
# continue
# operator = tokens[0]
# num1 = tokens[1]
# if len(tokens) < 3:
# num2 = "0"
# else:
# num2 = tokens[2]
# if len(tokens) > 3:
# num3 = tokens[3]
# # A place to store the return value of the math function we call,
# # to give us one clear place where that result is printed.
# result = None
# if not num1.isdigit() or not num2.isdigit():
# print("Those aren't numbers!")
# continue
# # We have to cast each value we pass to an arithmetic function from a
# # a string into a numeric type. If we use float across the board, all
# # results will have decimal points, so let's do that for consistency.
# elif operator == "+":
# result = add(float(num1), float(num2))
# elif operator == "-":
# result = subtract(float(num1), float(num2))
# elif operator == "*":
# result = multiply(float(num1), float(num2))
# elif operator == "/":
# result = divide(float(num1), float(num2))
# elif operator == "square":
# result = square(float(num1))
# elif operator == "cube":
# result = cube(float(num1))
# elif operator == "pow":
# result = power(float(num1), float(num2))
# elif operator == "mod":
# result = mod(float(num1), float(num2))
# elif operator == "x+":
# result = add_mult(float(num1), float(num2), float(num3))
# elif operator == "cubes+":
# result = add_cubes(float(num1), float(num2))
# else:
# result = "Please enter an operator followed by two integers."
# print(result)
| UTF-8 | Python | false | false | 4,266 | py | 5 | calculator1.py | 3 | 0.558118 | 0.54 | 0 | 208 | 19.408654 | 75 |
nguyenletan/python2 | 11,209,864,664,000 | c383f311b3bd331176c088dc2ad5aab1dd8f03d6 | c9540e808ac6c30c21f8a825293fa981f474aedb | /a2_task1.py | 39a71715b99202aedbd4e34553d1406bff1a71a7 | []
| no_license | https://github.com/nguyenletan/python2 | 47560f643911cf81198d37791e3845bec2e9e438 | 7b52d09a9886dd8885290950eb943cb442196e81 | refs/heads/master | 2022-10-02T02:00:49.652519 | 2020-05-21T11:35:06 | 2020-05-21T11:35:06 | 265,832,395 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Task 1 code template for FIT9136 Assignment 2.
# Instructions to students:
# 1. Where required, replace the *pass* statements with your own method and
# function definitions. Your submission must comply with the specification:
# do not rename specified functions or change the number or type of arguments
# or return values; otherwise you will be deemed not to have demonstrated
# clear comprehension of the specified instructions. (You may define
# your own additional functions and instance variables if you wish, as long
# as these don't contradict the specified requirements. You may also
# import any libraries allowed by the assignment specification.)
# 2. Complete task 1 within the framework of this template file.
# 3. Modify the filename (AND import statement(s), where required) to replace
# the xxxxxxxx with your student ID number.
# 4. Complete tasks 2 and 3 within the other template files. The finished
# program is split into three files, linked together by import statements.
# 5. In this file, you may define your own testing code within the 'main'
# block to check if your simulation is working when running this script file
# directly. Code in the main block will not be run by the auto marker algorithm,
# which will instead test the specified functions/methods by attempting to
# call them directly according to how they are defined in the assignment
# requirements specification.
# 6. Before submission, you should remove these instructions from this
# template file and add your own program comments instead. This template file
# has omitted program comments, which are your responsibility to add. Any
# other 'placeholder' comments should be removed from your final submission.
import pandas as pd
class Person:
# all with unique names => use Set instead of List
def __init__(self, first_name, last_name):
self.friends = []
self.first_name = first_name
self.last_name = last_name
def add_friend(self, friend_person):
self.friends.append(friend_person)
def get_name(self):
return self.first_name + ' ' + self.last_name
def get_friends(self):
# returns a list of Person objects for the social connections that have been added.
return list(self.friends)
def __repr__(self):
result = self.get_name() + ': '
for f in self.friends:
result += f.get_name() + ','
return result + '\n'
def __str__(self):
return self.get_name()
def get_firstname(name):
return name.split()[0]
def get_lastname(name):
return name.split()[1]
def load_people2():
# pd.set_option('display.max_rows', 1000)
df = pd.read_csv('a2_sample_set.txt',
header=None,
sep='[:]',
index_col=0,
names=['person', 'friends'],
engine='python')
# data = df.to_numpy()
friends = df['friends']
df['friends'] = friends.str.split(',')
people = {}
for index, value in df.iterrows():
person = Person(get_firstname(index), get_lastname(index))
people[person.get_name().strip()] = person
for index, value in df.iterrows():
person = people[index]
for friend_name in value[0]:
person.add_friend(people[friend_name.strip()])
print(type([v for k, v in people.items()]))
return [v for k, v in people.items()]
# def load_people():
# file = open('a2_sample_set.txt', 'r')
# for line in file:
# name_and_friends = line.split(':')
# person = Person(get_firstname(name_and_friends[0]), get_lastname(name_and_friends[0]))
# print(person)
def main():
print(load_people2())
# load_people2()
if __name__ == '__main__':
main()
# placeholder only. You may add your own testing code within this
# main block to check if the code is working the way you expect.
# do not add code here (outside the main block).
# columns = df.columns
# print(columns)
# print(df.describe())
# print(data[0])
# for d in data:
# print(d)
# print(d.str.split(','))
# friends = df['friends']
# print(friends.str.split(','))
# df['friends'] = friends.str.split(',')
# print(df)
# print(tabulate(df, tablefmt="pipe", headers="keys"))
# friends = df.friends
# print(friends)
# df.head()
# print(type(df.values))
# print(df.columns)
# print(df.loc['Jom Tones'])
# for _ in df:
# print(_)
# for _ in df:
# print(type(_))
# for i in range(2):
# print(df.iloc[i])
# for index, value in df.iterrows():
# print(index)
# print(value)
# print(type(_))
# print(type(df.iterrows()))
# for i in range(df.shape[0]):
# print(df.iloc[i])
# # For printing more than one columns
# # print(df.iloc[i, [0, 2]])
| UTF-8 | Python | false | false | 4,832 | py | 3 | a2_task1.py | 3 | 0.642798 | 0.635555 | 0 | 146 | 32.09589 | 96 |
jamesgleave/neat | 2,216,203,172,621 | f866479cc68aa20fd9a1688dd66a5ce95899f7d0 | f3f1ca77e428be31fce79108ca45dad03cb8217c | /plot_racetrack.py | 171558811e69e30f3ceb456719eeba4a7022de5f | [
"MIT"
]
| permissive | https://github.com/jamesgleave/neat | 11b7e1f6dbb33de3cfd604c4c96781546adf5f3e | f0776d1f0dc68e391b8bb97b82a42bf13605898a | refs/heads/master | 2021-09-12T08:48:47.192433 | 2018-04-15T17:50:41 | 2018-04-15T17:50:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import numpy as np
import matplotlib
#matplotlib.use('Agg',warn=False)
import matplotlib.pyplot as plt
import scipy.io
def angle(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2' """
c = np.dot(v1, v2)
s = np.linalg.norm(np.cross(v1, v2))
return np.arctan2(s, c)
def angle2(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2' with sign """
return np.arccos(np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))) * np.sign(np.cross(v1,v2))
v1 = np.array([ 1, 1])
v2 = np.array([ 1, -1])
fig = plt.figure()
mat = scipy.io.loadmat('racetrack.mat')
t_l = mat['t_l']
t_r = mat['t_r']
t_m = (t_l+t_r)/2
n = 1800
print(t_l.shape)
p = np.array([10*np.random.random()-8, 10*np.random.random()+250])
idx = 0
d_mp = 1e10
while np.linalg.norm(p-t_m[idx+1,:]) <= d_mp:
mp = p - t_m[idx+1,:]
d_mp = np.linalg.norm(mp)
idx += 1
lr = t_r[idx,:] - t_l[idx,:]
lr_n = np.array([-lr[1],lr[0]])
d_lr = np.dot(mp.T,lr) / np.dot(lr,lr) * lr # rename this
d = np.linalg.norm(d_lr)
d_sign = np.sign(np.cross(mp,lr_n))
print(d*d_sign)
print( d <= 5/2. )
plt.plot(t_l[:n,0],t_l[:n,1], 'b-')
plt.plot(t_r[:n,0],t_r[:n,1], 'b-')
plt.plot(t_m[:n,0],t_m[:n,1], 'y-')
plt.plot(p[0], p[1], 'rx')
plt.plot((t_m[idx,0],p[0]),(t_m[idx,1],p[1]), 'r-')
plt.plot((t_m[idx,0],t_m[idx,0]+d_lr[0]),(t_m[idx,1],t_m[idx,1]+d_lr[1]), 'g-')
plt.plot((t_m[idx,0],t_m[idx,0]+lr_n[0]),(t_m[idx,1],t_m[idx,1]+lr_n[1]), 'c-')
# horizon old
idx = 200
l = 10 # numbe of points
k = 10 # distance in steps
x_red = t_m[idx:idx+(l+1)*k:k,:]
a_red = []
for i in range(l):
a_red.append(angle2(x_red[i],x_red[i+1]))
a_red = np.array(a_red)
a_red *= 100
print(x_red)
print(a_red)
# horizon new
idx = 200
n = 10 # number of points
h = 10 # distance between points in m
x_last = t_m[idx]
x_red = [x_last]
i = j = 0
while True:
while True:
i += 1
x_tmp = t_m[idx+i]
if np.linalg.norm(x_tmp-x_last) > h:
x_red.append(x_tmp)
x_last = x_tmp
break
j += 1
if j == n:
break
x_red = np.array(x_red)
a_red = []
for i in range(n):
a_red.append(angle2(x_red[i],x_red[i+1]))
a_red = np.array(a_red) # angel between horizon points
a_red *= 100/3.
print(x_red)
print(a_red)
plt.plot(x_red[:,0], x_red[:,1], 'k.')
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
#ax.set_xlim(-20,10); ax.set_ylim(230,260)
ax.set_xlim(-40,30); ax.set_ylim(190,290)
plt.show()
| UTF-8 | Python | false | false | 2,490 | py | 21 | plot_racetrack.py | 13 | 0.565863 | 0.515663 | 0 | 113 | 21.035398 | 102 |
chuanqin1230/network-science | 13,675,175,920,514 | bb1fad38392727c100693810726fffda43895602 | dcb30fc270fda8fd63c0851e29bbcd91c6726040 | /codes/louvain.py | 59d03915a620d2314a3df0055fb25971ece384f8 | []
| no_license | https://github.com/chuanqin1230/network-science | 3c6a9bfc68a9c02a850fa253bbeb0e4027e71e1d | a6797a77f3766d5f0ee70356f415a91c92cbe446 | refs/heads/master | 2022-06-30T05:50:07.560434 | 2020-05-06T03:56:02 | 2020-05-06T03:56:02 | 261,643,062 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 13:50:33 2018
@author: Jinglin
"""
import os
from pygrid import *
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn import cluster, datasets, mixture
import modification
def louvain(graph_silm,SQV):
#set default weight value
graph_silm.es['weight'] = 1.0
edge_weight = graph_silm.es['weight']
#print graph_silm.es['weight']
#get adjacency matrix of weighted graph
adjacency_matrix = graph_silm.get_adjacency(attribute='weight')
#print adjacency_matrix
#node value/weight: the edges connected to the node
node_value = []
for node in graph_silm.vs:
node_value.append(sum(adjacency_matrix[node.index]))
graph_silm.vs["node_value"] = node_value
#print node_value
#calculate total edge weight
#resolution = 1/total_edge_weight
total_edge_weight = 0.0
for i in range(graph_silm.vcount()):
total_edge_weight += sum(adjacency_matrix[i])
print total_edge_weight
'''
IGraph library.
Copyright (C) 2006-2012 Gabor Csardi <csardi.gabor@gmail.com>
334 Harvard street, Cambridge, MA 02139 USA
'''
#the level of alg
multi_level = 1 #3
resolution1 = 1.0
resolution2 = resolution1/total_edge_weight #use standard modularity function
random_starts_number = 1 #10
#New graph with all nodes from graph_silm
#Initial network each node as a cluster
copy_graph_silm = graph_silm.as_undirected()
copy_graph_silm.delete_edges(None)
#print copy_graph_silm.vcount()
#get the initial clusters, 1 node as 1 clusters
initial_clusters = copy_graph_silm.clusters()
#print initial_clusters
#node_list has the node order, node neighbors has its neighbors' list
node_list = graph_silm.vs['name']
node_neighbors = []
for i in node_list:
node_neighbors.append(graph_silm.neighbors(i))
#print node_neighbors
#Louvain begin
for level in range(multi_level):
clusters_weight = np.zeros(len(initial_clusters))
for one_cluster in initial_clusters:
#print one_cluster
for vertex_index in one_cluster:
clusters_weight[one_cluster] += node_value[vertex_index]
#print copy_graph_silm.vs.select(vertex_index)['name']
#print node_value[vertex_index]
#print clusters_weight'''
#max modularity
max_modularity = -10000
#random seed 0, each time random the same value
random.seed(10)
random_value = random.random()
for i in range(random_starts_number):
if (random_starts_number > 1):
print 'No.' + (i+1) + ' Random Start'
#New graph with all nodes from graph_silm
#Initial network each node as a cluster
new_copy_graph_silm = graph_silm.as_undirected()
new_copy_graph_silm.delete_edges(None)
#print copy_graph_silm.vcount()
#get the initial clusters, 1 node as 1 clusters
new_initial_clusters = copy_graph_silm.clusters()
#print initial_clusters
j = 0
update_flag = 1 #True
#into the the multi-level loop
while ((j < multi_level) and update_flag):
if (multi_level > 1):
print 'No.' + (multi_level+1) + 'multilevel'
#update the update_flag
update_flag = update_flag_with_random(graph_silm, new_initial_clusters, random_value)
#print update_flag
def update_flag_with_random(graph_silm, new_initial_clusters, random_value):
if (graph_silm.vcount() == 1):
return 0
update_flag = local_moving_alg(graph_silm, new_initial_clusters, random_value)
if (clustering_n_clusters < n_nodes):
update_flag2 = local_moving_alg(graph_silm, new_initial_clusters, random_value)
if (update_flag2 == 1):
update_flag = 1
clustering.merge_clusters(clustering)
return update
def local_moving_alg(graph_silm, new_initial_clusters, random_value):
if (graph_silm.vcount() == 1):
return 0
update_flag = 0
clusters_weight = np.zeros(len(new_initial_clusters))
for one_cluster in new_initial_clusters:
#print one_cluster
for vertex_index in one_cluster:
clusters_weight[one_cluster] += graph_silm.vs.select(vertex_index)['node_value']
print list(clusters_weight)
#absent for unused clusters
#get an index of random list of node
random_node_T = []
for i in graph_silm.vs:
random_node_T.append(i.index)
#print list(random_node_T)
random.shuffle(random_node_T)
#print random_node_T
#first neighbor index
first_neighbor_index = []
temp_edges = 0
for i in graph_silm.vs:
first_neighbor_index[i] = temp_edges
stable_nodes_number = 0
index = 0
while(stable_nodes_number < graph_silm.vcount()):
j = random_node_T[index]
neighbor_clusters_number = 0
for k in range(first_neighbor_index[j]):
if (k < first_neighbor_index[j + 1]):
l = clustering_cluster[network_neighbor[k]]
if (edgeWeightPerCluster[l] == 0):
neighboring_cluster[n_neighboring_clusters] = l
n_neighboring_clusters++
edge_weightPer_cluster[l] += edge_weight[k]
cluster_weight[clustering_cluster[j]] -= node_weight[j]
n_nodes_per_cluster[clustering_cluster[j]]= n_nodes_per_cluster[clustering_cluster[j]] - 1
if (n_nodes_per_cluster[clustering-cluster[j]] == 0):
unusedCluster[n_unused_clusters] = clustering_cluster[j]
n_unused_clusters = n_unused_clusters + 1
best_cluster = -1
max_quality_function = 0
for k in range(n_neighboring_clusters[j]):
l = neighboring_cluster[k]
quality_function = edge_weight_per_cluster[l] - node_weight[j] * cluster_weight[l]
if ((quality_function > max_quality_function) or ((quality_function == max_quality_function) and (l < best_cluster))):
best_cluster = l
max_quality_function = quality_function
edge_weight_per_cluster[l] = 0
if (max_quality_function == 0):
best_cluster = unused_cluster[n_unused_clusters - 1]
n_unused_clusters--
cluster_weight[best_cluster] += node_weight[j]
n_nodes_per_cluster[best_cluster]++
if (best_cluster == clustering_cluster[j]):
stable_nodes_number++
else:
clustering_cluster[j] = best_cluster
stable_nodes_number = 1
update_flag = 1
new_cluster = new int[n_nodes]
clustering_n_clusters = 0
for i in range(n_nodes):
if (n_nodes_per_cluster[i] > 0):
new_cluster[i] = clustering_n_clusters
n_clusters++
for i in range(n_nodes):
clustering_cluster[i] = new_cluster[clustering_cluster[i]]
return update_flag | UTF-8 | Python | false | false | 7,278 | py | 15 | louvain.py | 4 | 0.599615 | 0.587387 | 0 | 214 | 33.014019 | 134 |
chan-p/DSP | 12,120,397,716,020 | fc0c48d15c0c34d7d2db8161998e889890972f72 | f938f9f16a7efc52991beb1dd0371b1af6e9d47e | /parse.py | e97b05529c874a8d0bd43744c9d113a2952e734c | []
| no_license | https://github.com/chan-p/DSP | 2de354251713c3040089012b234531907f7081a1 | 2f5111b5f71eb642cb8abcc8f1f1598d8efde9b1 | refs/heads/master | 2016-09-26T16:53:50.188139 | 2016-09-14T07:30:01 | 2016-09-14T07:30:01 | 68,183,914 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | f = open("/Users/TomonotiHayshi/Desktop/haihu/training.csv")
count = 0
count1 = 0
g12 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data12.csv","w")
g34 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data34.csv","w")
g56 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data56.csv","w")
g78 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data67.csv","w")
g910 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data78.csv","w")
g1112 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data910.csv","w")
g1314 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data1112.csv","w")
g1516 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data1314.csv","w")
g1718 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data1516.csv","w")
g1920 = open("/Users/TomonotiHayshi/GitHub/DSP/data/data1718.csv","w")
count=0
for aa in f:
line = aa.split(",")
if line[8] == "1" or line[9] == "1":
g12.write(aa)
if line[10] == "1" or line[11] == "1":
g34.write(aa)
if line[12] == "1" or line[13] == "1":
g56.write(aa)
if line[14] == "1" or line[15] == "1":
g78.write(aa)
if line[16] == "1" or line[17] == "1":
g910.write(aa)
if line[18] == "1" or line[19] == "1":
g1112.write(aa)
if line[20] == "1" or line[21] == "1":
g1314.write(aa)
if line[22] == "1" or line[23] == "1":
g1516.write(aa)
if line[24] == "1" or line[25] == "1":
g1718.write(aa)
if line[26] == "1" or line[27] == "1":
g1920.write(aa)
g12.close()
g34.close()
g56.close()
g78.close()
g910.close()
g1112.close()
g1314.close()
g1516.close()
g1314.close()
g1516.close()
g1718.close()
g1920.close()
| UTF-8 | Python | false | false | 1,636 | py | 240 | parse.py | 11 | 0.608191 | 0.490831 | 0 | 48 | 33.083333 | 70 |
HuLiangHu/Spiders | 17,325,898,078,413 | bdb84d7e70fb1fe938e3dc3669fa4207adc55a96 | cfab1c4e501d4a45e8e21e94244b878350fd032d | /DoubanSpiders/douban/spiders/movie.py | a924428b9457cbb39aaae1be644241e85b1dbbf4 | []
| no_license | https://github.com/HuLiangHu/Spiders | 85f2317f6a5342c6921dd32fa212eb0170396449 | 0e809279eff2584b73557cd9d51ad2e4c148a5cf | refs/heads/master | 2021-06-16T03:35:12.560845 | 2020-01-29T14:05:23 | 2020-01-29T14:05:23 | 182,927,044 | 1 | 1 | null | false | 2021-03-25T22:37:02 | 2019-04-23T03:52:22 | 2020-01-29T14:05:57 | 2021-03-25T22:36:59 | 64,393 | 1 | 1 | 6 | Python | false | false | # -*- coding: utf-8 -*-
import scrapy
import json
import re
import random
from datetime import datetime
from scrapy.utils.project import get_project_settings
#import pymysql
import random
#from scrapy_redis import connection
class DoubanNewTVSpider(scrapy.Spider):
name = "newtv2"
start_urls = ['https://movie.douban.com/j/new_search_subjects?sort=U&range=1,10&tags=电视剧&start=0&countries=']
apikeys = ['088acf79cc38fde819a06e6d64aaf9b8',
'01e1232b205f406405a36981611dc12c', '03405aad00de230c09c11007029a6924']
def start_requests(self):
#self.server = connection.from_settings(self.settings)
for item in ['美国','台湾','日本','韩国','英国','法国','德国','意大利','西班牙','印度','泰国','俄罗斯','伊朗','加拿大','澳大利亚','爱尔兰','瑞典','巴西','丹麦']:
yield scrapy.Request('https://movie.douban.com/j/new_search_subjects?sort=U&range=1,10&tags=电视剧&start=0&countries=%s' %item)
def parse(self, response):
subjects = json.loads(response.body_as_unicode())
for subject in subjects['data']:
yield subject
'''
url = 'https://api.douban.com/v2/movie/subject/%s?apikey=%s' % (
subject['id'], random.choice(self.apikeys))
yield scrapy.Request(url,callback = self.parse_info)
'''
if len(subjects['data'])>0:
start = int(re.search('start=(\d+)',response.url).group(1))
start = start + 20
url = re.sub('start=\d+','start=%s'%start,response.url)
yield scrapy.Request(url)
#self.server.lpush('doubanmovieinfo:start_urls', subject['id'])
def parse_info(self,response):
info = json.loads(response.body_as_unicode())
info['createdtime'] = str(datetime.now())
info['updatedtime'] = str(datetime.now())
info['_sys_collection'] = 'douban_movieinfo'
info['_sys_upset_fields'] = ['rating', 'wish_count','updatedtime']
return info | UTF-8 | Python | false | false | 2,055 | py | 244 | movie.py | 211 | 0.618024 | 0.576549 | 0 | 47 | 40.574468 | 136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.