repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
LucasXMorais/Programacao-de-Computadores | 3,298,534,889,911 | da17ab38a8c2bb5d226e6fc365c9bc7c215a39da | f4b2c225e3ec9d3210a96167570a068004ca7513 | /Python/exerc func 12.3.py | 919a2dc50b817276467c3f0e9fa253a38a3404d3 | []
| no_license | https://github.com/LucasXMorais/Programacao-de-Computadores | 21fd3a9af808065fe1a6570e6a884b1127ad57e1 | 7f17af38db6faf9ae8488c9b2c2bee2dcb408f27 | refs/heads/main | 2023-06-28T16:57:46.531931 | 2021-08-04T11:24:30 | 2021-08-04T11:24:30 | 392,659,290 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import shuffle
def shuffled(pal):
pal = list(pal)
shuffle(pal)
return ''.join(pal)
palavra = str(input("Palavra = "))
print(shuffled(palavra))
| UTF-8 | Python | false | false | 174 | py | 88 | exerc func 12.3.py | 55 | 0.643678 | 0.643678 | 0 | 7 | 22.857143 | 34 |
316141952/thc | 10,350,871,228,566 | 4e7e58cd431cf6d8e6064ee40c171610c81e6040 | d0ae88ee783239509b5f7a5411c05cba78df9a8e | /Clases/Programas/Tarea4/Problema9.py | 6bd5046b4b64d6cfdeff81a89dad8d499a320d52 | []
| no_license | https://github.com/316141952/thc | 297ffbd367bff14210a9fd3c1ad8fb1f338d94e4 | 67da15890efeee90c11380f4c549099e484e3960 | refs/heads/master | 2020-04-15T18:57:59.813773 | 2019-01-31T03:39:36 | 2019-01-31T03:39:36 | 164,932,325 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Es un programa que te da el área, perímetro y altura
# de cualquier triángulo, y el tipo de triángulo
def trian (a,b,c) :
while a > 0 and b > 0 and c > 0 :
if a == b == c :
return 'Equilatero'
if a != b != c != a :
return 'Escaleno'
if a == b != c or a == c != b or b == c != a :
return 'Isosceles'
def perimetro (a,b,c) :
if trian (a,b,c) == 'Equilatero' :
P = 3*a
return P
if trian (a,b,c) == 'Escaleno' or 'Isosceles' :
P = a+b+c
return P
import math
from math import sqrt as rc
def altura (a,b,c) :
if trian (a,b,c) == 'Equilatero' :
h = (rc(3)/2.00)*a
return h
if trian (a,b,c) == 'Escaleno' or 'Isosceles' :
s = (a+b+c)/2
h = (2.00/a)*rc(s*(s-a)*(s-b)*(s-c))
return h
import math
from math import sqrt as rc
def area (a,b,c) :
if trian (a,b,c) == 'Equilatero' :
A = (rc(3)/4.00)*(a**2)
return A
if trian (a,b,c) == 'Escaleno' or 'Isosceles' :
s = (a+b+c)/2
A = (a*((2.00/a)*rc(s*(s-a)*(s-b)*(s-c))))/2.00
return A
| UTF-8 | Python | false | false | 1,171 | py | 102 | Problema9.py | 70 | 0.466153 | 0.44473 | 0 | 45 | 24.622222 | 55 |
m2-farzan/BCI-Project | 13,975,823,609,289 | fb1c04b9d0d59255f6b5fc2004ee3b2cbca356ca | 0ae7ccfc3cd51c9584f5b8d9aa41cdd5f9aac99e | /preprocessing/hilbert.py | 606422dd2fca3bf5528a23f240c7647b679d6c88 | []
| no_license | https://github.com/m2-farzan/BCI-Project | 44e6e81f2646ba48af695a07e7f8faebf880fbac | 45f37e29ce14713e2a09547312fb32825280c97e | refs/heads/main | 2023-03-05T23:47:02.729985 | 2021-02-15T04:31:17 | 2021-02-15T04:31:17 | 333,011,490 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from scipy.signal import hilbert
from sklearn.base import BaseEstimator, TransformerMixin
"""
Implements Sakhavi 2018
(https://ieeexplore.ieee.org/document/8310961)
section III, sub-section B, item 2.
"""
class Hilbert(TransformerMixin, BaseEstimator):
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return np.abs(hilbert(X))
| UTF-8 | Python | false | false | 420 | py | 35 | hilbert.py | 17 | 0.692857 | 0.664286 | 0 | 19 | 21.105263 | 56 |
eduardosalaz/statistics | 17,583,596,124,573 | 9d9e61d241fdf27af013cc826e8601374734aab7 | dd4cf7a3eaf8380333f6a8f7c8e2ec7539c87b0c | /src/regresor.py | 6acc3858b6025cf537008852bb62b7328f6215d2 | []
| no_license | https://github.com/eduardosalaz/statistics | 395599c45cc88e1d8d134b4bae00377ed2a12d82 | e20f5ba99f8a7a9d352a2e8c3ce91940b13618e0 | refs/heads/master | 2023-07-16T03:07:15.140438 | 2020-08-25T00:49:17 | 2020-08-25T00:49:17 | 288,778,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.regressionplots import influence_plot
# importante el ultimo import
# leer dataset
df = pd.read_csv('../csv/longley.csv', index_col=0)
# print(df.head())
# aplicar api de statsmodels
est = smf.ols(formula='Employed ~ GNP', data=df).fit()
print(est.summary())
# analisis de minimos cuadrados ordinarios
# separar ejes
y = df.Employed
x = df.GNP
x = sm.add_constant(x)
# agregamos constante para usarlo como un valor multiplicativo, el predict ocupa para
# saber cuantas veces se va a recalcular
# regresion
x_1 = pd.DataFrame({'GNP': np.linspace(x.GNP.min(), x.GNP.max(), 100)})
# para agarrar intervalos ocupas la constante en el dataframe original
x_1 = sm.add_constant(x_1)
# crear un df con los datos de GNP para poder usarlos
# print(x_1)
y_pron = est.predict(x_1)
plt.scatter(x.GNP, y, alpha=0.3) # alpha es la separacion entre los puntos
plt.ylim(30, 100) # acotar la grafica
plt.xlabel('PIB')
plt.ylabel('Tasas de Empleo')
plt.title('Ajuste de Regresion')
plt.plot(x_1.GNP, y_pron, 'r', alpha=0.9)
plt.savefig('../out/lineal_simple_gdp.png')
plt.show()
inf = influence_plot(est)
inf.savefig('../out/influencia.png')
inf.show()
# estadistica descriptiva
# apalancamiento y residuales, grafico de influencia, tamaño de los circulos
| UTF-8 | Python | false | false | 1,406 | py | 23 | regresor.py | 15 | 0.740214 | 0.72669 | 0 | 42 | 32.452381 | 85 |
Aniketthani/Python-Tkinter-Tutorial-With-Begginer-Level-Projects | 5,970,004,565,245 | 24774460ee3cbc3671a99666b89d3e9950f3329d | 36388a5aa2d67b857ecb9475b4126b564db20f13 | /98wikipedia_search_tool.py | b3ff8cbd8a1dc1be21a9e7c0ceb2a83d78f86c57 | []
| no_license | https://github.com/Aniketthani/Python-Tkinter-Tutorial-With-Begginer-Level-Projects | a0dce41b8588c2f4e44d215d89f7884ae79ce756 | 90744f57674cb21355bc3b9a4098a7eebd9394a9 | refs/heads/main | 2023-03-25T01:36:29.699968 | 2021-03-19T16:55:34 | 2021-03-19T16:55:34 | 343,379,127 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
#please install the library for using wikipedia api using command : pip install wikipedia
import wikipedia as wiki
root=Tk()
root.title("WikiPedia Search Tool")
root.geometry("700x675")
def clear():
my_entry.delete(0,END)
my_text.delete(0.0,END)
def search():
data = wiki.page(my_entry.get())
#clear screen
clear()
#output wikipedia results to textbox
my_text.insert(0.0,data.content)
labelframe=LabelFrame(root,text="Search Wikipedia")
labelframe.pack(pady=20)
#create entry box
my_entry=Entry(root,font=("Helvetica",20),width=47)
my_entry.pack(pady=20,padx=20)
#create Text box frame
myframe=Frame(root)
myframe.pack(pady=5)
#create a vertical scrollbar
text_scroll=Scrollbar(myframe)
text_scroll.pack(side=RIGHT,fill=Y)
#create horizontal scroll bar
hor_scroll=Scrollbar(myframe,orient="horizontal")
hor_scroll.pack(side=BOTTOM,fill=X)
#create Text box
my_text=Text(myframe,yscrollcommand=text_scroll.set,wrap="none",xscrollcommand=hor_scroll.set)
my_text.pack(pady=5)
#configure scrollbars
text_scroll.config(command=my_text.yview)
hor_scroll.config(command=my_text.xview)
#button frame
button_frame=Frame(root)
button_frame.pack(pady=10)
search_button=Button(button_frame,text="Lookup",font=("Arial",32),fg="#3a3a3a",command=search)
search_button.grid(row=0,column=0,padx=20)
clear_button=Button(button_frame,text="Clear",font=("Arial",32),fg="#3a3a3a",command=clear)
clear_button.grid(row=0,column=1,padx=20)
root.mainloop() | UTF-8 | Python | false | false | 1,508 | py | 102 | 98wikipedia_search_tool.py | 97 | 0.749337 | 0.720822 | 0 | 68 | 21.191176 | 94 |
sq/Fracture | 6,253,472,411,367 | eea866b8f857efc56c7d1eb503128ae111da21a3 | 686a77ea2b032be4f1f3053599ef6c9c9b614744 | /Squared/TaskLib/TaskLibTests/data/test.py | 298e430c6e43b13d7ca071568fcfc4143f1fb2dc | [
"MIT"
]
| permissive | https://github.com/sq/Fracture | 4fd213f6de12735ceec212c718dc9f753d4e8431 | 4639451ce854797eda8bf89be2f6224a25fca0d7 | refs/heads/master | 2021-03-16T08:31:55.238996 | 2021-03-11T16:21:14 | 2021-03-11T16:29:32 | 1,593,016 | 28 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | import util
import types
from xml.sax.handler import *
StartDocument = 0
EndDocument = 1
StartElement = 2
EndElement = 3
IgnorableWhitespace = 4
Characters = 5
def storeInDict(target, key, value):
target[key] = value
def storeInExistingAttribute(target, key, value, converter=None):
if hasattr(target, key):
if converter:
setattr(target, key, converter(value))
else:
setattr(target, key, value)
def storeInAttribute(target, key, value):
setattr(target, key, value)
def readAttributes(target, attributes):
for k in attributes.getNames():
setattr(target, k, attributes[k])
def stripWhitespace(text):
lines = [x.strip() for x in text.replace('\r', '').split('\n')]
return ' '.join(lines).strip()
class Return(object):
def __init__(self, value=None):
self.value = value
class ParseState(object):
def __init__(self, output):
self.token = None
self.output = output
self.current = None
self.result = None
self.stack = []
def fillDictionary(self, target, storer = storeInDict):
depth = 0
key = None
buffer = None
while True:
yield
type = self.token[0]
if type == StartElement:
depth += 1
key = self.token[1]
buffer = ""
elif type == EndElement:
if key != None:
storer(target, key, buffer)
key = None
buffer = None
depth -= 1
if depth < 0:
break
elif type == IgnorableWhitespace:
if key != None:
buffer += self.token[1]
elif type == Characters:
if key != None:
buffer += self.token[1]
def readContent(self, handler, result=None):
buffer = ""
while True:
type = self.token[0]
if type == IgnorableWhitespace:
buffer += self.token[1]
elif type == Characters:
buffer += self.token[1]
elif type == EndElement:
break
yield
if isinstance(handler, types.ListType):
handler[0] = buffer
else:
handler(buffer)
if result:
yield Return(result)
def readInts(self, buffer):
def myHandler(text):
values = stripWhitespace(text).split(" ")
buffer.extend(map(int, values))
return self.readContent(myHandler)
def readFloats(self, buffer):
def myHandler(text):
values = stripWhitespace(text).split(" ")
buffer.extend(map(float, values))
return self.readContent(myHandler)
class ColladaParseHandler(ContentHandler):
def __init__(self, state):
self.state = state
self.state.stack = []
self.state.current = self.state.main()
self._debug = False
def _getDebugState(self):
return "TOKEN: %r STACK: %s" % (self.state.token, ",".join([util.lifetime.best_repr(x) for x in self.state.stack]))
def _push(self, handler):
if self._debug:
print "+%s" % (util.lifetime.best_repr(handler),)
self.state.stack.append(self.state.current)
self.state.current = handler
if len(self.state.stack) > 512:
raise Exception("Handler stack overflow", self._getDebugState())
def _pop(self):
if self._debug:
print "-%s" % (util.lifetime.best_repr(self.state.current),)
if len(self.state.stack) > 0:
self.state.current = self.state.stack.pop()
else:
self.state.current = None
def _step(self):
newHandler = None
try:
if self.state.result != None:
self.state.current.send(self.state.result)
self.state.result = None
else:
newHandler = self.state.current.next()
except StopIteration:
self._pop()
except Exception, e:
if self._debug:
print e, self._getDebugState()
raise
if newHandler != None:
if isinstance(newHandler, Return):
self.state.result = newHandler.value
self._pop()
return True
else:
self._push(newHandler)
return False
else:
return True
def token(self, type, *args):
self.state.output._tokensRead += 1
t = (type,) + args
self.state.token = t
if (type == StartElement) and self._debug:
print "<" + args[0] + ">"
elif (type == EndElement) and self._debug:
print "</" + args[0] + ">"
while True:
r = self._step()
if r:
break
def startDocument(self):
self.token(StartDocument)
def endDocument(self):
self.token(EndDocument)
def startElement(self, name, attributes):
self.state.output._elementsRead += 1
self.token(StartElement, name, attributes)
def endElement(self, name):
self.token(EndElement, name)
def ignorableWhitespace(self, whitespace):
self.token(IgnorableWhitespace, whitespace)
def characters(self, content):
self.token(Characters, content) | UTF-8 | Python | false | false | 5,737 | py | 245 | test.py | 3 | 0.509674 | 0.505142 | 0 | 188 | 28.526596 | 123 |
dimsumlabs/dootdoorlock | 13,451,837,612,066 | 19648da3b89b27de666077fdb8fe7f573562d18c | c89fa3718497d74593aedaeded66b55ea9b495dd | /bin/doord | 6aa86da8a4542dfd4841456463d8d85087512a40 | []
| no_license | https://github.com/dimsumlabs/dootdoorlock | bf85713d40638b1eeb5b4b423ca8362a43553617 | 8e079c6cce5e2571a7cbca49350f64be3575db73 | refs/heads/master | 2020-12-25T17:26:24.070725 | 2016-08-17T11:57:09 | 2016-08-17T11:57:09 | 8,122,590 | 1 | 0 | null | false | 2014-07-07T10:09:18 | 2013-02-10T12:47:13 | 2014-04-03T07:52:46 | 2014-07-07T10:07:58 | 329 | 4 | 2 | 1 | Python | null | null | #!/usr/bin/env python3
from threading import Timer
import RPi.GPIO as GPIO
import argparse
import zmq
from dsldoor import create_server_socket
DOOR_PIN = 17
SOCK_PATH = 'tcp://127.0.0.1:9000'
class Door(object):
def __init__(self, open_seconds=5):
GPIO.setmode(GPIO.BCM)
GPIO.setup(DOOR_PIN, GPIO.OUT)
self.open_seconds = 5
# Hack to run cancel without conditionals
self.timer = Timer(0, lambda: None)
def open(self):
GPIO.output(DOOR_PIN, True)
self.timer.cancel()
self.timer = Timer(self.open_seconds, self.close)
self.timer.start()
def close(self):
GPIO.output(DOOR_PIN, False)
print('Closed door')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DSL door opening daemon')
parser.parse_args()
ctx, socket = create_server_socket(sock_path=SOCK_PATH)
socket.setsockopt(zmq.SUBSCRIBE, b'OPEN')
socket.setsockopt(zmq.SUBSCRIBE, b'CLOSE')
door = Door()
while True:
evt = socket.recv().decode('utf8')
if evt == 'OPEN':
door.open()
elif evt == 'CLOSE':
door.close()
| UTF-8 | Python | false | false | 1,181 | 8 | doord | 5 | 0.6105 | 0.596105 | 0 | 54 | 20.87037 | 75 |
|
theTechieGal/Virtual-Doc | 10,866,267,261,543 | 488cc1048aac3d26ebb8541e306cf2db873a90a7 | 1c351fe38b5eb3da9a157a635e07ab7e00783634 | /jsonify.py | 8e7985faec547b61ea34b89b71e7d474a1f01e9c | []
| no_license | https://github.com/theTechieGal/Virtual-Doc | 5497474a8db44efd3c604389e8731a216df777f7 | 4c541df8c9671c56b103fbccb61f791189e55df5 | refs/heads/master | 2022-12-18T23:13:22.569929 | 2020-10-04T04:30:50 | 2020-10-04T04:30:50 | 287,530,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Script to convert the disease dataset from a CSV format to a JSON format
if __name__ == "__main__" :
# Open the two files
fcsv = open('diseases.csv', 'r')
fjson = open('intents.json', 'r+')
# Move the file pointer to the end of the JSON file
fjson.seek(0, 2)
pos = fjson.tell()
# Check if the file is already ready for use
if pos != 1230 :
exit(0)
# Move the file pointer back to overwrite the end of the JSON object
fjson.seek((pos - 4), 0)
fjson.write(",\n")
# Read all the diseases in the CSV file, skipping the comment on the first line
diseases = fcsv.readlines()
diseases = diseases[1:]
# Iterate over all the disease entries
for disease in diseases :
# Separate the disease entry into its components and remove all extra whitespaces
elements = disease.split(',', 2)
for i in range (3) :
elements[i] = elements[i].strip()
# Write formatted text to the JSON file
fjson.write("\t{{\"tag\": \"{}\",\n".format(elements[1]))
fjson.write("\t \"patterns\": [")
# Get all the listed symptoms from the disease entry
symptoms = elements[2][1:-1].split(',')
# Iterate over the symptoms and write them into the JSON file
for symptom in symptoms :
symptom = symptom.strip()
fjson.write("\"{}\", ".format(symptom))
# Move the file pointer back to overwrite the extra ','
pos = fjson.tell()
fjson.seek((pos - 2), 0)
fjson.write("],\n")
# Write formatted text to the JSON file
fjson.write("\t \"responses\": [\"I can't say for certain, but this might be a symptom of {d}\", \"I'm not sure, but it is possible that you might have {d}\", \"Based on that information, it could turn out to be a case of {d}\"],\n".format(d = elements[1]))
fjson.write("\t \"context\": [\"\"]\n")
fjson.write("\t},\n")
# Move the file pointer back to overwrite the extra ',' with the end of JSON object
pos = fjson.tell()
fjson.seek((pos - 2), 0)
fjson.write("\n]}")
# Close both the files
fcsv.close()
fjson.close()
| UTF-8 | Python | false | false | 1,974 | py | 14 | jsonify.py | 6 | 0.654002 | 0.643364 | 0 | 63 | 30.333333 | 259 |
daniel-men/python_utility_functions | 19,138,374,306,525 | c6ef654682515195b8cd86f1a7b2b0392f66ced2 | 7243890b24f4af46af4af016be198a0781c26cd0 | /python_utility_functions/image_utilities.py | 6a4ac1401736f46f983d2e2bcaf39ab9a04f7cca | [
"MIT"
]
| permissive | https://github.com/daniel-men/python_utility_functions | 17f8d2e6d4f098bc91368529de38bab309d41df7 | b627162c884f72a33412cc32a311615287b6df53 | refs/heads/main | 2023-08-27T12:59:52.178163 | 2021-10-25T14:33:00 | 2021-10-25T14:33:00 | 414,167,229 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List, Tuple
from PIL import Image
import numpy as np
import cv2
import pydicom
def convert_to_grayscale(image: np.ndarray) -> np.ndarray:
_image = np.array(scale(image), dtype=np.uint8)
grayscale = cv2.cvtColor(src=image, code=cv2.COLOR_RGB2GRAY)
return scale(grayscale, new_min=np.min(_image), new_max=np.max(_image))
def load_grayscale_image(path: str) -> np.ndarray:
return np.array(Image.open(path).convert('L'))
def resize_image(image: np.ndarray, new_size: Tuple[int, int]) -> np.ndarray:
_image = image.astype(np.uint8)
return cv2.resize(_image, new_size).astype(image.dtype)
def load_images_grayscale(image_paths: List[str]) -> np.ndarray:
return np.array([load_grayscale_image(p) for p in image_paths])
def load_image_data_from_dicom(dicom_path: str) -> np.ndarray:
return pydicom.dcmread(dicom_path).pixel_array
def load_dicom_images(dicom_paths: List[str]) -> np.ndarray:
return np.array([load_image_data_from_dicom(path) for path in dicom_paths])
def save_image(image: np.ndarray, path: str):
Image.fromarray(image).save(path)
def scale(data: np.ndarray, new_min=0, new_max=1) -> np.ndarray:
maximum = np.max(data)
minimum = np.min(data)
return (new_max - new_min) * ((data - minimum) / (maximum - minimum)) + new_min
def create_roi2D(image: np.ndarray, x: int, y: int, roi_size: int) -> np.ndarray:
size = roi_size // 2
return image[y-size:y+size, x-size:x+size] | UTF-8 | Python | false | false | 1,463 | py | 10 | image_utilities.py | 8 | 0.694463 | 0.686945 | 0 | 38 | 37.526316 | 83 |
higor-gomes93/curso_programacao_python_udemy | 5,763,846,117,085 | 6ce654c01df8ef18a4ca8e5101ae6f5b97744049 | f6b9212ff906cdca5fdf422654920293a3ef1050 | /Sessão 4 - Exercícios/ex31.py | 7ff19c37a6f593257d3a602dfec66832d9681cb7 | []
| no_license | https://github.com/higor-gomes93/curso_programacao_python_udemy | bc8e83276c7b2285d231eb86b69da9fa2c8e8f2d | 2840a007177691e277b3f3124a0b05e60f8bbaff | refs/heads/main | 2023-03-29T03:34:15.154161 | 2021-03-20T18:35:22 | 2021-03-20T18:35:22 | 349,805,994 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Leia um número inteiro e imprima o seu antecessor e o seu sucessor.
'''
num_int = int(input("Insira um numero inteiro: "))
print(f"O seu antecessor eh {num_int - 1} e o seu sucessor eh {num_int + 1}") | UTF-8 | Python | false | false | 206 | py | 290 | ex31.py | 268 | 0.682927 | 0.673171 | 0 | 6 | 33.333333 | 77 |
daianasousa/atividade_ifpi | 8,976,481,652,450 | 687bb9e28e211946555e7a1428f6b83bb0e81078 | 952b202f37e737df707bc1d9e024a70b3093e704 | /questao_02_letra_b.py | f5e88fb65d50b3010ded1f68925ba25a241adfc6 | []
| no_license | https://github.com/daianasousa/atividade_ifpi | 0e1c501a7d8ffc3bfd8c2718cbdd23fec1d2e26a | 587cc00a969f3b05df7f585a4548a43d436558d0 | refs/heads/master | 2022-12-14T00:33:18.017734 | 2020-09-07T20:41:21 | 2020-09-07T20:41:21 | 287,811,118 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""Questão_2_Letra_B.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1CbZzQflczZEGDTFofibl0c0Lf3fUvOGG
"""
def media(num_1, num_2, num_3, media):
return num_1, num_2, num_3, media
def main():
num_1 = int(input('Digite um número: '))
num_2 = int(input('Digite um número: '))
num_3 = int(input('Digite um número: '))
media = (num_1 + num_2 + num_3) / 3
print(f'A média dos números é: {media:.1f}')
if __name__=='__main__':
main() | UTF-8 | Python | false | false | 552 | py | 39 | questao_02_letra_b.py | 39 | 0.638532 | 0.601835 | 0 | 21 | 25 | 77 |
SHDShim/PMatRes | 9,646,496,582,556 | 12d251cdb1858e273437696d526f31122f8efce7 | e3800198178af5c61dfaa6af4ac1a83a7cc0b222 | /Demo_XRD_patterns_from_dpp/utils/__init__.py | 64ba84fc0972a6ce51a26febeb2110f7644834f1 | [
"Apache-2.0"
]
| permissive | https://github.com/SHDShim/PMatRes | 97a3895b64ba5bb8ed56ce10bbbae6a8e3cf925b | 92440c11f2723861dbb82cecdc321fcef9de4443 | refs/heads/master | 2020-05-09T15:44:13.209054 | 2019-04-16T04:31:34 | 2019-04-16T04:31:34 | 181,243,548 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .pyqtutils import undo_button_press, SpinBoxFixStyle
from .fileutils import samefilename, extract_filename, make_filename, \
get_sorted_filelist, find_from_filelist, writechi, readchi, \
extract_extension, change_file_path
from .dialogs import dialog_savefile, ErrorMessageBox, InformationBox
from .excelutils import xls_ucfitlist, xls_jlist
from .physutils import convert_wl_to_energy
| UTF-8 | Python | false | false | 399 | py | 14 | __init__.py | 6 | 0.804511 | 0.804511 | 0 | 7 | 56 | 71 |
stuartlangridge/canute | 4,801,773,444,946 | cc334c4e8c409907fa501f78c81f768503b469bc | 70dafb6035e9006af8640bad04f08e207ab40e2e | /canute2.py | fdc4cabab6520154d54195aa95928fb1d4fee7e6 | []
| no_license | https://github.com/stuartlangridge/canute | 9249c5307042167271173f6e064daa01a3d2ea1d | 5b1a5495f7428384eee7628a4ee36ff875a4a720 | refs/heads/master | 2020-05-21T15:00:08.713264 | 2019-10-11T14:59:14 | 2019-10-11T14:59:14 | 65,857,521 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""
Canute
A simple desktop launcher which runs plugins at runtime and doesn't index
"""
import sys, os, random, json, time, signal
from PyQt5.QtWidgets import QApplication, QDesktopWidget
from PyQt5.QtQml import QQmlApplicationEngine, QQmlEngine, QQmlComponent
from PyQt5.QtCore import (QAbstractListModel, Qt, pyqtSlot, QDir,
QSortFilterProxyModel, QProcess, QTimer, QModelIndex)
from PyQt5.QtGui import QKeySequence
from shortyQt import Shorty
# Make sure Ctrl-C quits
# https://coldfix.de/2016/11/08/pyqt-boilerplate/
# This is bullshit that I have to do this, to be clear, Qt people.
# You are horrible and you should feel guilty every day.
# Doing this also stops Qt swallowing redirected stdout.
def setup_interrupt_handling():
signal.signal(signal.SIGINT, _interrupt_handler)
safe_timer(50, lambda: None)
def _interrupt_handler(signum, frame):
QApplication.quit()
def safe_timer(timeout, func, *args, **kwargs):
def timer_event():
try:
func(*args, **kwargs)
finally:
QTimer.singleShot(timeout, timer_event)
QTimer.singleShot(timeout, timer_event)
class SortedSearchResults(QSortFilterProxyModel):
"""First we sort the results by score..."""
@pyqtSlot(str)
def search_input(self, q):
return self.sourceModel().search_input(q)
@pyqtSlot(int)
def invoke(self, row_index):
sm = self.sourceModel()
qmi = self.index(row_index, 0, QModelIndex())
data = dict([(role[1], self.data(qmi, role[0])) for role in sm._roles.items()])
return sm.invoke_data(data)
class ShortenedSearchResults(QSortFilterProxyModel):
"""...and then we filter them to the first ten only.
This is done in two separate models because you can't filter
and sort in one model; the filterAcceptsRow function gets told
the index of this result in the underlying source model, before
sorting, and you can't get the index _after_ sorting because
sorting isn't done until after filtering!"""
@pyqtSlot(str)
def search_input(self, q):
return self.sourceModel().search_input(q)
@pyqtSlot(int)
def invoke(self, row_index):
self.sourceModel().invoke(row_index)
def filterAcceptsRow(self, source_row_idx, source_parent):
#index = self.sourceModel().index(source_row_idx, 0, source_parent)
#data = repr([self.sourceModel().data(index, Qt.UserRole + k) for k in range(1,6)])
#print("filtering? sri=%s, ir=%s, name=%s, score=%s" % (
# source_row_idx, index.row(),
# self.sourceModel().data(index, Qt.UserRole + 1)[:20],
# self.sourceModel().data(index, Qt.UserRole + 3)))
if source_row_idx > 10:
#print("Removing %s at index %s" % (self.sourceModel().data(index, Qt.UserRole + 1), source_row_idx))
return False
return super(ShortenedSearchResults, self).filterAcceptsRow(
source_row_idx, source_parent)
class SearchResults(QAbstractListModel):
NameRole = Qt.UserRole + 1
KeyRole = Qt.UserRole + 2
ScoreRole = Qt.UserRole + 3
IconRole = Qt.UserRole + 4
DescriptionRole = Qt.UserRole + 5
PluginRole = Qt.UserRole + 6
IconInvertRole = Qt.UserRole + 7
_roles = {NameRole: b"name", KeyRole: b"key", ScoreRole: b"score",
IconRole: b"icon", DescriptionRole: b"description",
PluginRole: b"plugin", IconInvertRole: b"inverted_icon"}
def __init__(self):
super(SearchResults, self).__init__()
self.plugin_dir = QDir(os.path.join(os.path.split(__file__)[0], "plugins"))
self.plugin_dir.setFilter(QDir.Files | QDir.Executable)
self._results = []
self._processes = []
def add_results(self, results):
# We need to add to our internal list, and then fire events so
# everyone knows we've added it
# It doesn't need to be added in order; the SortedShortenedSearchResults
# model takes care of that
first = len(self._results)
last = len(self._results) + len(results) - 1
self.beginInsertRows(QModelIndex(), first, last),
self._results += results
self.endInsertRows()
def process_finished(self, process, plugin, q, exitcode, exitstatus):
if exitstatus == QProcess.CrashExit:
if exitcode == 9:
# SIGKILL, meaning we killed it because a new query came in
return
stderr = str(process.readAllStandardError(), encoding="utf-8")
print("Plugin error (%s) from %s for query '%s'\n%s\n" % (
exitcode, plugin, q, stderr))
return
stdout = str(process.readAllStandardOutput(), encoding="utf-8")
#print("from plugin", plugin, "query", q, "\n", stdout[:150])
try:
j = json.loads(stdout)
except:
print("JSON load error from plugin %s for query '%s'\n'%s'" % (
plugin, q, stdout[:50]))
return
results = j.get("results")
if results:
r = results[:10] # never any point passing more than 10
for rr in r: rr["plugin"] = plugin
self.add_results(r)
def query_plugin(self, plugin, q):
p = QProcess(self)
self._processes.append(p)
p.start(plugin, ["--query", q])
p.finished.connect(lambda ec, es: self.process_finished(p, plugin, q, ec, es))
def query_plugins(self, q):
plugin_list = self.plugin_dir.entryList()
# if the first word is actually the name of a plugin, then
# run that one only
words = q.split()
if len(words) > 1:
for p in plugin_list:
base = os.path.splitext(os.path.basename(p))[0]
if base == words[0]:
self.query_plugin(self.plugin_dir.filePath(p), " ".join(words[1:]))
return
for p in plugin_list:
self.query_plugin(self.plugin_dir.filePath(p), q)
def update(self, q):
self.beginResetModel()
self._results = []
while self._processes: self._processes.pop(0).kill()
if q.strip() and len(q.strip()) > 2: self.query_plugins(q)
self.endResetModel()
@pyqtSlot(str)
def search_input(self, q):
self.update(q)
def invoke_data(self, data):
print("invoking", data)
if data[b"key"].startswith("special_") and hasattr(self, "execute_%s" % data[b"key"]):
getattr(self, "execute_%s" % data[b"key"])()
return
p = QProcess(self)
p.start(data[b"plugin"], ["--invoke", data[b"key"]])
def execute_special_restart(self):
executable = sys.executable
args = sys.argv[:]
args.insert(0, sys.executable)
time.sleep(1)
print("Respawning")
os.execvp(executable, args)
def rowCount(self, parent=None, *args, **kwargs):
return len(self._results)
def data(self, QModelIndex, role=None):
row = QModelIndex.row()
if role:
rolename = self._roles.get(role)
if rolename:
val = self._results[row].get(rolename.decode("utf-8"))
return val
return
def roleNames(self):
return self._roles
def reveal(win):
if win.isVisible():
print("Super pressed while visible; hiding")
win.setVisible(False)
else:
print("Super pressed; showing")
win.setVisible(True)
win.showNormal()
win.raise_()
win.requestActivate()
def main():
def woot(*args):
print("woot args", args)
sortedModel.invalidate()
shortenedModel.invalidateFilter()
print("Canute startup", time.asctime())
app = QApplication(sys.argv)
setup_interrupt_handling()
engine = QQmlApplicationEngine()
context = engine.rootContext()
model = SearchResults()
sortedModel = SortedSearchResults()
sortedModel.setSourceModel(model)
sortedModel.setSortRole(model.ScoreRole)
sortedModel.sort(0, Qt.DescendingOrder)
shortenedModel = ShortenedSearchResults()
shortenedModel.setSourceModel(sortedModel)
model.rowsInserted.connect(woot)
context.setContextProperty("pyresults", shortenedModel)
engine.load(os.path.join(os.path.split(__file__)[0], 'canute2.qml')) # must load once we've assigned the model
SHORTCUT_SHOW = QKeySequence("Ctrl+Alt+M")
show = Shorty(SHORTCUT_SHOW)
win = engine.rootObjects()[0]
show.activated.connect(lambda: reveal(win))
show.enable()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 8,662 | py | 18 | canute2.py | 16 | 0.61891 | 0.611752 | 0 | 243 | 34.646091 | 114 |
RaulWhite/GolfilloBot | 5,162,550,707,003 | 3d4d973ce7013fcb4c7f70149129675bbf2d8cff | 5e9b16ce087c59d980931e36fbd609bfa422cec1 | /golfillobot/tokens.py | 05272601c76bb3fedc0e38655b843802a529301a | []
| no_license | https://github.com/RaulWhite/GolfilloBot | 51fbefb4aeed72762f4922bc09e5115b9a48032a | 49f41730478a6eea419d115d10f9116467697b47 | refs/heads/master | 2022-12-07T20:14:25.322630 | 2022-12-06T13:38:00 | 2022-12-06T13:38:00 | 169,161,852 | 5 | 2 | null | false | 2022-12-06T13:38:01 | 2019-02-04T22:47:06 | 2021-10-01T14:51:20 | 2022-12-06T13:38:00 | 35 | 5 | 0 | 0 | Python | false | false | import os
from dotenv import load_dotenv
def get_token():
if "TEL_BOT_TOKEN" not in os.environ:
load_dotenv()
TEL_BOT_TOKEN = os.getenv("TEL_BOT_TOKEN")
return TEL_BOT_TOKEN | UTF-8 | Python | false | false | 195 | py | 10 | tokens.py | 5 | 0.661538 | 0.661538 | 0 | 9 | 20.777778 | 46 |
kidult00/NatureOfCode-Examples-Python | 5,961,414,618,277 | c5b1e1df57a570837234d2d8bf19e3f472e62607 | 5f0eeef355fa84b165d4e0707e8874755cc03259 | /chp04_systems/Exercise_4_12_ArrayofImages/Exercise_4_12_ArrayofImages.pyde | c224e70956815a150066d7482a28a77791c33f5b | []
| no_license | https://github.com/kidult00/NatureOfCode-Examples-Python | 5835fbed114f3991b9986852f31d29a0a46d7e53 | 42461590deebbe305d5815ff0d207ff974335ad5 | refs/heads/master | 2021-05-11T04:47:53.999705 | 2018-03-07T15:54:12 | 2018-03-07T15:54:12 | 117,946,895 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # The Nature of Code - Python Version
# [kidult00](https://github.com/kidult00)
# Array of Images for particle textures
from ParticleSystem import ParticleSystem
def setup():
size(640, 360, P2D)
global imgs, ps
imgs = [None] * 5
imgs[0] = loadImage("corona.png")
imgs[1] = loadImage("emitter.png")
imgs[2] = loadImage("particle.png")
imgs[3] = loadImage("texture.png")
imgs[4] = loadImage("reflection.png")
ps = ParticleSystem(imgs)
def draw():
background(0)
blendMode(ADD)
up = PVector(0, -0.2)
ps.applyForce(up)
ps.run()
for i in range(5):
ps.addParticle(mouseX, mouseY) | UTF-8 | Python | false | false | 660 | pyde | 103 | Exercise_4_12_ArrayofImages.pyde | 102 | 0.622727 | 0.589394 | 0 | 30 | 21.033333 | 41 |
A2Media-id/spidery | 11,141,145,199,576 | 016a82323c82e497fe56c583cd546cd9acb136df | 731d3a0c960d441895b6b105bb4043eb41078821 | /src/spidery/spider/images/bing.py | 3e4667d7b1dfe48f8c783839a5e06f97913d846f | [
"MIT"
]
| permissive | https://github.com/A2Media-id/spidery | db81492a57347ad79fabd383c00a221e608755f7 | 48cf0f30fb85c176db952b111e329c8bf644f6b4 | refs/heads/master | 2023-06-06T17:49:09.653898 | 2021-07-11T10:48:09 | 2021-07-11T10:48:09 | 372,170,559 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import html
import json
import logging
import re
import traceback
from urllib.parse import quote_plus, unquote
from spidery.spider.images import ImageEngine
from spidery.spider.resource import DataImage
class Engine(ImageEngine):
"""The resource quality of this engine is good, but the search is a bit slow (there are too many invalid resources)"""
_regex = r"m=\"(\{\"\;cid[^\"]+\})\""
_limit = 25
_me = __file__
def _loop_search(self, term: str, page: int):
result = []
api = 'https://www.bing.com/images/async'
params = {'q': quote_plus(term), 'first': (page * self._limit + 1), 'count': self._limit,
'qft': '+filterui:imagesize-large', 'adlt': 'off',
'safeSearch': 'off'}
ret = self.get(url=api, params=params) # Both keywords and results must be gb2312 encoding
if not ret:
logging.error(f"Engine {self.me()} search failed: {term}")
return False, result
if not re.search(Engine._regex, ret.text, re.IGNORECASE | re.MULTILINE):
logging.error(f"Engine {self.me()} search failed: {term} server did not return results")
return False, result
items = re.findall(self._regex, ret.text)
if items:
items = set(map(html.unescape, items))
try:
while items and True:
try:
item = json.loads(items.pop())
dat = DataImage(engine=self.me())
for k, v in item.items():
value = unquote(v).strip() if (v and type(v) == str) else v
if k == 't':
dat.title = value
elif k == 'desc':
dat.desc = value
elif k == 'murl':
dat.url = value
elif k == 'purl':
dat.source = value
if not dat.url:
continue
elif dat.url and self.blacklisted_domain(dat.url):
continue
else:
result.append(dat)
except KeyboardInterrupt:
return
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
raise
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
now_page, total_page = (page, self._limit)
has_next = (int(now_page) < int(total_page) and len(result)) # Whether there is a next page
logging.info(f"Engine {__class__.__module__} is searching: {term} ({now_page}/{total_page})")
return has_next, result
if __name__ == '__main__':
eng = Engine()
for news in eng.search('Larissa Chou Gugat Cerai Alvin Faiz'):
print(news)
| UTF-8 | Python | false | false | 3,295 | py | 37 | bing.py | 36 | 0.484067 | 0.481942 | 0 | 73 | 43.136986 | 122 |
JayjeetAtGithub/spack | 17,686,675,337,324 | 7427ba052cad273cbfec2d5f378f9ee953f7174c | fb2cc597f319380d228fc15c4008760a82203687 | /lib/spack/spack/util/classes.py | cad2de2c48ea0e46bdcf2519427ac59eea6bd704 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
]
| permissive | https://github.com/JayjeetAtGithub/spack | c41b5debcbe139abb2eab626210505b7f930d637 | 6c2df00443a2cd092446c7d84431ae37e64e4296 | refs/heads/develop | 2023-03-21T02:35:58.391230 | 2022-10-08T22:57:45 | 2022-10-08T22:57:45 | 205,764,532 | 0 | 0 | MIT | true | 2019-09-02T02:44:48 | 2019-09-02T02:44:47 | 2019-07-12T08:48:51 | 2019-07-12T08:48:49 | 15 | 0 | 0 | 0 | null | false | false | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# Need this because of spack.util.string
from __future__ import absolute_import
import inspect
import llnl.util.tty as tty
from llnl.util.lang import list_modules, memoized
from spack.util.naming import mod_to_class
__all__ = ["list_classes"]
@memoized
def list_classes(parent_module, mod_path):
"""Given a parent path (e.g., spack.platforms or spack.analyzers),
use list_modules to derive the module names, and then mod_to_class
to derive class names. Import the classes and return them in a list
"""
classes = []
for name in list_modules(mod_path):
mod_name = "%s.%s" % (parent_module, name)
class_name = mod_to_class(name)
mod = __import__(mod_name, fromlist=[class_name])
if not hasattr(mod, class_name):
tty.die("No class %s defined in %s" % (class_name, mod_name))
cls = getattr(mod, class_name)
if not inspect.isclass(cls):
tty.die("%s.%s is not a class" % (mod_name, class_name))
classes.append(cls)
return classes
| UTF-8 | Python | false | false | 1,238 | py | 8,218 | classes.py | 6,591 | 0.66559 | 0.657512 | 0 | 39 | 30.74359 | 73 |
RuchitDoshi/LeetCode_Practice | 1,967,095,061,351 | abd9e4cc8410975987f5d72d2d1b51da44be6489 | 4f179fdd48108020f49064be6686abcaac69d1ef | /Easy/112_path_sum.py | a3c7bff18015002910820053f7147dc92f6e6c19 | []
| no_license | https://github.com/RuchitDoshi/LeetCode_Practice | b1e4fc64b9e8b5b60b1d4c115d7f1477b83fa6dc | 48dd00abc4b71e83b475ecdac23bc3ddbe55641e | refs/heads/master | 2023-03-04T07:45:28.978099 | 2021-02-14T04:46:14 | 2021-02-14T04:46:14 | 283,289,648 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def help(self,root,s,sum):
if root==None:
return False
s+=root.val
if root.left==None and root.right==None and s==sum:
return True
else:
return self.help(root.left,s,sum) or self.help(root.right,s,sum)
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if root==None:
return False
return self.help(root,0,sum) | UTF-8 | Python | false | false | 652 | py | 81 | 112_path_sum.py | 64 | 0.539877 | 0.53681 | 0 | 20 | 31.65 | 76 |
sampollard/CIS650 | 2,602,750,217,173 | 36b70ae69aa7f0e9b0fbafa70303d3b688a7e310 | a223a73d74158db52d7678c2be5e076a00ccd7a7 | /butler/start_forks.py | e2a3182747588b759cd49fa93d0547c326699763 | []
| no_license | https://github.com/sampollard/CIS650 | 971450aa857bc54be11c9fe99f282edbc6371ec3 | f8972db69e7debe65798a858adfba1ea19255358 | refs/heads/master | 2021-08-23T17:29:20.548330 | 2017-12-05T22:08:07 | 2017-12-05T22:08:07 | 107,166,954 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import sys
#print(len(sys.argv))
myList = ['a','b','c','d','e','f','g','h','i']
if len(sys.argv) < 2:
print("Need to pass in a number as an arg")
sys.exit(1)
try:
if not isinstance(int(sys.argv[1]), int):
print("Pass in a int")
sys.exit(1)
except:
print("Pass in a int")
sys.exit(1)
processwait =[]
for i in range(int(sys.argv[1])):
try:
p = subprocess.Popen(['python', 'fork.py', str(myList[i])])
processwait.append(p)
print("started " + str(myList[i]))
except EnvironmentError as e:
sys.exit('failed to start %r, reason: %s' % (executable, e))
#Wait for each process to end, for call control c and kill all child processes
while len(processwait)>1:
try: # wait for the child process to finish
for each in processwait:
each.wait()
except KeyboardInterrupt:
sys.exit("interrupted")
| UTF-8 | Python | false | false | 928 | py | 20 | start_forks.py | 16 | 0.595905 | 0.588362 | 0 | 34 | 26.294118 | 78 |
SiddharthKumar02/chat-bridge | 13,194,139,554,972 | 4485f33a52e505ddbbb2867800642cc613ac0a33 | 1de1ba761c65af332706c499d5c3b8be45d464a2 | /src/app.py | b3ebfe0ef6ed23d7d80b8332bd4f3d4c56ab2a0b | []
| no_license | https://github.com/SiddharthKumar02/chat-bridge | 45f26120f6082b70e7856c47cff0d1306c0cbf66 | db985f800db9de234eea90d750cab4fb237ae120 | refs/heads/master | 2021-01-23T03:20:45.084126 | 2017-01-29T18:37:14 | 2017-01-29T18:37:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import wtl
import requests
import sys
import time
from pyee import EventEmitter
from ChatAdapters.RocketChatAdapter import RocketChatAdapter
from ChatAdapters.TelegramAdapter import TelegramAdapter
from ChatAdapters.IrcAdapter import IrcAdapter
try:
event_emitter = EventEmitter()
adapters = wtl.load_config(config_prefix="adapters",config_dir="/etc/chat-bridge/")
bridges = wtl.load_config(config_prefix="bridges",config_dir="/etc/chat-bridge/")
chat_adapters = {}
for adapter in adapters:
if adapters[adapter]['type'] == "rocketchat":
chat_adapters[adapter] = RocketChatAdapter(adapter, event_emitter, baseurl=adapters[adapter][
'baseurl'], username=adapters[adapter]['username'], password=adapters[adapter]['password'])
for room in chat_adapters[adapter].rocketchat.channels_list():
print(room['_id'])
if 'name' in room:
print(room['name'])
print("")
elif adapters[adapter]['type'] == "telegram":
chat_adapters[adapter] = TelegramAdapter(
adapter, event_emitter, adapters[adapter]['token'])
elif adapters[adapter]['type'] == "irc":
chat_adapters[adapter] = IrcAdapter(adapter, event_emitter, adapters[adapter]['server'], adapters[adapter]['port'], adapters[adapter]['nickname'])
else:
print("Adapter type {} not supported".format(
adapters[adapter]['type']))
sys.exit(1)
all_adapters_ok = True
for bridge in bridges:
print("Chack adapters for: {}".format(bridge))
for channel in bridges[bridge]:
adapter_ok = channel['adapter_name'] in adapters
adapter_ok = chat_adapters[channel['adapter_name']
].use_channel(channel['channel_id']) and adapter_ok
all_adapters_ok = all_adapters_ok and adapter_ok
print("> {} ({}): {}".format(
channel['adapter_name'], channel['channel_id'], adapter_ok))
print()
if not all_adapters_ok:
print("Some adapter is not supported, check your config")
sys.exit(1)
for adapter in chat_adapters:
chat_adapters[adapter].start()
@event_emitter.on('message')
def event_handler(message):
dests = []
print("From channel: {}".format(message['channel_id']))
print("From username: {}".format(message['from']))
print("Text: {}".format(message['text']))
for bridge_name in bridges:
from_label = None
send_to_this_bdirge = False
this_bridge_dests = []
for channel in bridges[bridge_name]:
if channel['channel_id'] == message['channel_id'] and channel['adapter_name'] == message['adapter_name']:
send_to_this_bdirge = True
from_label = channel['from_channel_label']
else:
this_bridge_dests.append(channel)
if send_to_this_bdirge:
print("Send using: {}".format(bridge_name))
if send_to_this_bdirge:
for dest_channel in this_bridge_dests:
print("Sending to {}".format(dest_channel['channel_id']))
chat_adapters[dest_channel['adapter_name']].send_msg(dest_channel['channel_id'], message[
'from'] + " (" + from_label + "): " + message['text'])
print()
print("Running...")
running = True
while running:
running = False
allRunning = True
for adapter in chat_adapters:
running = running or chat_adapters[adapter].isAlive()
allRunning = allRunning and chat_adapters[adapter].isAlive()
for adapter in chat_adapters:
if not allRunning and chat_adapters[adapter].isAlive():
chat_adapters[adapter].stop()
print("Waiting for '{}' to stop".format(adapter))
chat_adapters[adapter].join()
try:
time.sleep(1)
except KeyboardInterrupt as ki:
running = False
for adapter in chat_adapters:
if chat_adapters[adapter].isAlive():
chat_adapters[adapter].stop()
print("Waiting for '{}' to stop".format(adapter))
chat_adapters[adapter].join()
except Exception as e:
print(e)
sys.exit(0)
print("End")
| UTF-8 | Python | false | false | 4,493 | py | 7 | app.py | 5 | 0.578455 | 0.577343 | 0 | 113 | 38.761062 | 158 |
dgmp88/lighting-controls | 5,463,198,442,683 | 06aa79fbe26a65cda8122e786b97feaede1ddce5 | 1ab926dccb015cd5185fc4dcbd79f31fa8a71270 | /server/app.py | 8bb4773b7799c4e5dcb3c64163b2176a21df4b72 | []
| no_license | https://github.com/dgmp88/lighting-controls | dad93ea15749640177917ef4fe74c1fffdb5fe6b | 759292f81c627053c453ca1cb106a8e17a696bba | refs/heads/master | 2021-01-10T11:58:46.097532 | 2016-04-13T20:29:24 | 2016-04-13T20:29:24 | 45,944,398 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tornado.ioloop
import tornado.web
import os
import time
from handlers import ChangeValuesHandler, MainHandler
import pigpio
pgpio_obj = pigpio.pi() # connect to local Pi
red_pin = 2
green_pin = 24
blue_pin = 3
def start_tornado(app):
app.listen(80)
print "Starting Torando"
tornado.ioloop.IOLoop.instance().start()
print "Tornado finished"
def switch_off():
pgpio_obj.set_PWM_dutycycle(red_pin, 0)
pgpio_obj.set_PWM_dutycycle(green_pin, 0)
pgpio_obj.set_PWM_dutycycle(blue_pin, 0)
def stop_tornado():
pgpio_obj.stop()
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
print "Asked Tornado to exit"
def make_app():
inputs = {'lighting_settings': {'rgb': [0, 0, 0]},
'pgpio_obj': pgpio_obj,
'pins': [red_pin, green_pin, blue_pin]}
handlers = [
(r"/", MainHandler, inputs),
(r"/dochange/", ChangeValuesHandler, inputs),
]
DEBUG = False
if os.environ.get("DEBUG") == "True":
DEBUG = True
settings = {
'static_path': os.path.join(os.path.dirname(__file__), 'static')
}
return tornado.web.Application(handlers, debug=DEBUG, **settings)
if __name__ == "__main__":
app = make_app()
start_tornado(app)
switch_off()
try:
while True:
time.sleep(1)
print 'hi'
except KeyboardInterrupt:
stop_tornado()
| UTF-8 | Python | false | false | 1,420 | py | 11 | app.py | 6 | 0.617606 | 0.608451 | 0 | 60 | 22.666667 | 72 |
youngcyberop/PythonCrashCourse | 14,121,852,495,905 | ba33e3505a9827e78b708cd135452309be315789 | b4317fe96d7368e37e55724349f29e45d3294fa0 | /Chapter 3 - Introducing Lists/tiy_3_2_greetings.py | 11670d97015951b36c07238a7c42041553dde775 | []
| no_license | https://github.com/youngcyberop/PythonCrashCourse | 096844319205c8b37d1cf2fc9c2425c26818abe5 | e4274eedb98e5e85cc63b0bdf3021824553b2c0b | refs/heads/master | 2023-01-10T00:58:41.147704 | 2020-11-10T13:25:45 | 2020-11-10T13:25:45 | 230,657,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
3-2. Greetings: Start with the list you used in Exercise 3-1, but instead of
just
printing each person’s name, print a message to them. The text of each message
should be the same, but each message should be personalized with the
person’s name.
'''
names = ['Erin', 'Dan', 'Sydney']
print(f"Hello, {names[0]} how are you this evening?")
print(f"What time is it, {names[1]}?")
print(f"Are you hungry,{names[2]}?")
| UTF-8 | Python | false | false | 422 | py | 36 | tiy_3_2_greetings.py | 36 | 0.703349 | 0.686603 | 0 | 12 | 33.833333 | 78 |
EMBEDDIA/anchor-point-generation | 7,584,912,263,925 | 4b8c6ef0186de421289cf88d2065cc594bceda65 | 371f399f33a907b620aa3f9eaf97152e763edb62 | /other/lematizer.py | 73feaae0998a03f7b9c2a0c4e0806c1e78bc8a57 | [
"MIT"
]
| permissive | https://github.com/EMBEDDIA/anchor-point-generation | abaa41bc1c2e9698606856eacdcf26805d8e934c | f7841c7583407f78ab333169ccde5c84be7fd4f7 | refs/heads/main | 2023-02-16T12:05:28.556308 | 2020-12-01T09:38:32 | 2020-12-01T09:38:32 | 316,466,242 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # /media/luka/Portable Disk/Datasets/embeddings_alignment/hr-sl.txt/OpenSubtitles.hr-sl.hr.lemmatized
import argparse
import os
import stanfordnlp
def match_pars_opensubtitles(ali1):
# matches paragraphs from file ali1 with those from ali2, based on alignfile and outputs them
# as generator, tab separated. sentences within paragraphs are </s> separated.
with open(ali1, 'r') as leftpars:
for line in leftpars:
lpar = line.strip()
yield lpar
def match_pars_iob(input):
with open(input, 'r') as leftpars:
sentence = []
sentence_raw = []
for line in leftpars:
if len(line) == 1:
continue
lpar = line.split()
if lpar[0] == '1' and sentence:
yield ' '.join(sentence), sentence_raw
sentence = []
sentence_raw = []
sentence.append(lpar[1])
sentence_raw.append(lpar)
yield ' '.join(sentence), sentence_raw
def multipara_corpus_lemmatizer(matchingpars, nlp1, args, input):
if os.path.exists(input + '.lemmatized'):
sent_num = 0
with open(input + '.lemmatized', 'r') as r:
for _ in r:
sent_num += 1
else:
sent_num = 0
i = 0
sentence_count_ok = False
with open(input + '.unlemmatized', 'a') as w_u:
with open(input + '.lemmatized', 'a') as w_l:
for lpar in matchingpars:
if i < sent_num and not sentence_count_ok:
i += 1
continue
else:
sentence_count_ok = True
doc1 = nlp1(lpar[0])
lem1 = [word.lemma if word.lemma is not None else '_' for sent in doc1.sentences for word in sent.words]
ulem1 = [word.text if word.text is not None else '_' for sent in doc1.sentences for word in sent.words]
w_u.write(' '.join(ulem1) + '\n')
w_l.write(' '.join(lem1) + '\n')
if args.use_prelemmatized is not None:
with open(input + '.lemmatized_iob', 'a') as w_l_i:
assert len(lem1) == len(lpar[1]), 'Incorrect length - number of lemmas not equal to num of lines in input file.'
for line_i, line in enumerate(lpar[1]):
line[1] = lem1[line_i]
w_l_i.write('\t'.join(line) + '\n')
w_l_i.write('\n')
if sent_num % 10000 == 0:
print('%d sentences processed' % sent_num)
sent_num += 1
def dict_lemmatizer(text, nlp1):
doc1 = nlp1(text)
lem1 = [word.lemma if word.lemma is not None else '_' for sent in doc1.sentences for word in sent.words]
return ' '.join(lem1)
def read_part(path, nlp1):
res = []
with open(path, 'r') as read_file:
for line in read_file:
if len(line) == 1:
continue
split = line.split('\t')
res.append((split[0], split[1]))
def main(args):
parser = argparse.ArgumentParser(
description='Find words from languages L1 and L2 in same context, output dictionaries and embeddings for those words.')
parser.add_argument('--input', help='Path to document')
parser.add_argument('--lang', help='Language of document')
parser.add_argument('--lang2', default=None, help='Language of document')
parser.add_argument('--nlpbatch', default=5000, type=int, help='Language of document')
parser.add_argument('--format', default='openSubtitles', help='Format of processed input file')
parser.add_argument('--dict_input', action='store_true', help='If format is not multipara_crawl_like')
# parser.add_argument('--use_prelemmatized', default=None, help='Path to prelematized file not in iob_format')
parser.add_argument('--use_prelemmatized', action='store_true', help='Whether to save lemmatized file in iob_format')
args = parser.parse_args(args)
os.environ["CUDA_VISIBLE_DEVICES"]="0"
processors = 'tokenize,lemma'
try:
nlp1 = stanfordnlp.Pipeline(lang=args.lang, processors=processors, lemma_batch_size=args.nlpbatch, tokenize_batch_size=args.nlpbatch, use_gpu=True, tokenize_pretokenized=True)
if args.lang2 is not None:
nlp2 = stanfordnlp.Pipeline(lang=args.lang2, processors=processors, lemma_batch_size=args.nlpbatch, tokenize_batch_size=args.nlpbatch, use_gpu=True, tokenize_pretokenized=True)
except:
stanfordnlp.download(args.lang)
nlp1 = stanfordnlp.Pipeline(lang=args.lang, processors=processors, lemma_batch_size=args.nlpbatch, tokenize_batch_size=args.nlpbatch, use_gpu=True, tokenize_pretokenized=True)
if args.lang2 is not None:
nlp2 = stanfordnlp.Pipeline(lang=args.lang2, processors=processors, lemma_batch_size=args.nlpbatch, tokenize_batch_size=args.nlpbatch, use_gpu=True, tokenize_pretokenized=True)
if args.dict_input:
with open(args.input, 'r') as read_file:
with open(args.input + '.lemmatized', 'w') as write_file:
for line in read_file:
split_line = line.split('\t')
l1 = dict_lemmatizer(split_line[0], nlp1)
l2 = dict_lemmatizer(split_line[1][:-1], nlp2)
write_file.write(l1 + '\t' + l2 + '\n')
dict_lemmatizer(args.input, nlp1)
else:
if os.path.isdir(args.input):
input_files = [os.path.join(args.input, filename) for filename in os.listdir(args.input) if filename.split('.')[-1] == 'tsv' and not os.path.exists(os.path.join(args.input, filename + '.lemmatized'))]
else:
input_files = [args.input]
for input in input_files:
if args.format == 'openSubtitles':
matchingpars = match_pars_opensubtitles(input)
elif args.format == 'iob':
matchingpars = match_pars_iob(input)
else:
raise Exception('Corpus format not supported!')
multipara_corpus_lemmatizer(matchingpars, nlp1, args, input)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| UTF-8 | Python | false | false | 6,233 | py | 11 | lematizer.py | 10 | 0.589443 | 0.577571 | 0 | 137 | 44.49635 | 212 |
yshrimp/algorithm | 6,253,472,384,441 | e0ec3711fb386812708807ae5f1af0b7889533f5 | 8247976cc4a6fee5dd45544a12cb4a8eebd3e1ba | /backjoon/2588-multiply.py | 44617b17fbc2246e998ff38caa14dfec77403ea3 | []
| no_license | https://github.com/yshrimp/algorithm | 546e0d19ff13a074db3d75a42e8f86f4a3f0ac5c | 83cd73f0e32fe45515898ee82db0bda8e9e9286c | refs/heads/main | 2023-07-07T11:00:07.605565 | 2021-08-12T13:44:23 | 2021-08-12T13:44:23 | 350,624,513 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = int(input())
b = input()
for i in reversed(range(3)):
print(a*int(b[i]))
print(a*int(b))
# 시간은 처음 풀었을 때보다 더 걸렸다. 이유는?
| UTF-8 | Python | false | false | 163 | py | 35 | 2588-multiply.py | 34 | 0.582677 | 0.574803 | 0 | 9 | 13.111111 | 28 |
Deetss/AGooseNamedDeetzz | 3,040,836,888,452 | da7d1b126b1034d4d2c2f84c87936bd28b4af832 | c874f720a781ddf03314a79c28d788e38b3c5293 | /Deetzz.py | 2472c50e67b6a4647fa36ccb56919b2ee665e8f2 | []
| no_license | https://github.com/Deetss/AGooseNamedDeetzz | 397740d0402aa733cd1e6e62143eac41e9d66487 | d87a5487320da3ac1cc6991036c4a7386f4aaf18 | refs/heads/master | 2020-06-01T15:16:03.955942 | 2019-06-13T18:53:44 | 2019-06-13T18:53:44 | 190,830,318 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import time
from Util import *
from States import *
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
class Deetzz(BaseAgent):
def initialize_agent(self):
#This runs once before the bot starts up
self.me = obj()
self.ball = obj()
self.start = time.time()
self.steer_correction_radians: float = 0
self.state = calcShot()
self.controller = calcController
def checkState(self):
if self.state.expired:
if calcShot().available(self) == True:
self.state = calcShot()
elif quickShot().available(self) == True:
self.state = quickShot()
elif wait().available(self) == True:
self.state = wait()
else:
self.state = quickShot()
def get_output(self, game: GameTickPacket) -> SimpleControllerState:
self.preprocess(game) # Put some game data in easy to use variables
self.checkState() # Checks to see which state the bot needs to be set to
return self.state.execute(self)
def preprocess(self, game):
self.players = []
car = game.game_cars[self.index]
self.me.location.data = [car.physics.location.x, car.physics.location.y, car.physics.location.z]
self.me.velocity.data = [car.physics.velocity.x, car.physics.velocity.y, car.physics.velocity.z]
self.me.rotation.data = [car.physics.rotation.pitch, car.physics.rotation.yaw, car.physics.rotation.roll]
self.me.rvelocity.data = [car.physics.angular_velocity.x, car.physics.angular_velocity.y, car.physics.angular_velocity.z]
self.me.matrix = rotator_to_matrix(self.me)
self.me.boost = car.boost
self.me.grounded = car.has_wheel_contact
ball = game.game_ball
self.ball.location.data = [ball.physics.location.x,ball.physics.location.y,ball.physics.location.z]
self.ball.velocity.data = [ball.physics.velocity.x,ball.physics.velocity.y,ball.physics.velocity.z]
self.ball.rotation.data = [ball.physics.rotation.pitch,ball.physics.rotation.yaw,ball.physics.rotation.roll]
self.ball.rvelocity.data = [ball.physics.angular_velocity.x,ball.physics.angular_velocity.y,ball.physics.angular_velocity.z]
self.ball.local_location = to_local(self.ball,self.me)
self.boosts = game.game_boosts
for i in range(game.num_cars):
if i != self.index:
car = game.game_cars[i]
temp = obj()
temp.index = i
temp.team = car.team
temp.location.data = [car.physics.location.x, car.physics.location.y, car.physics.location.z]
temp.velocity.data = [car.physics.velocity.x, car.physics.velocity.y, car.physics.velocity.z]
temp.rotation.data = [car.physics.rotation.pitch, car.physics.rotation.yaw, car.physics.rotation.roll]
temp.rvelocity.data = [car.physics.angular_velocity.x, car.physics.angular_velocity.y, car.physics.angular_velocity.z]
self.me.boost = car.boost
flag = False
for item in self.players:
if item.index == i:
item = temp
flag = True
break
if flag:
self.players.append(temp)
| UTF-8 | Python | false | false | 3,512 | py | 2 | Deetzz.py | 1 | 0.611902 | 0.611617 | 0 | 78 | 44.025641 | 136 |
tamamushi/BlackMamba | 8,504,035,257,701 | cd23ac7bbe1ad9828683f7abc1301838c4f6f2e3 | b29308a1e990e5dbe6903156bf0a56d9a7033f94 | /src/models.py | 50d23b98779f2d8ac43408c4c1f25e9818b24d3f | []
| no_license | https://github.com/tamamushi/BlackMamba | 0ab1c2ce6dc8d286751619bdb6ecddfd0f82b88d | d3ceb65656c8a4ad4f447f469be16709bfe5845b | refs/heads/master | 2021-01-02T22:30:59.177164 | 2015-10-24T14:31:51 | 2015-10-24T14:31:51 | 42,781,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/env python
# -*- coding: utf-8 -*-
# vim:set ts=4 fenc=utf-8:
from webapp import db
from werkzeug import generate_password_hash, check_password_hash
from sqlalchemy.orm import synonym
from sqlalchemy import Column, Integer, String, Unicode, UnicodeText, ForeignKey
class User(db.Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
uname = Column(String(18), unique=True, nullable=False)
_password = Column('password', String(100), nullable=False);
def _get_password(self):
return self._password
def _set_password(self, password):
if password:
password = password.strip()
self._password = generate_password_hash(password)
password_descriptor = property(_get_password, _set_password)
password = synonym('_password', descriptor=password_descriptor)
def check_password(self, password):
password = password.strip()
if not password:
return False
return check_password_hash(self.password, password)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
@classmethod
def authenticate(cls, query, uname, password):
user = query(cls).filter(cls.uname == uname).first()
if user is None:
return None, False
return user, user.check_password(password)
def __repr__(self):
return u'<User id={self.id} uname={self.uname!r}>'.format(self=self)
def init():
db.create_all()
| UTF-8 | Python | false | false | 1,452 | py | 28 | models.py | 16 | 0.716942 | 0.711433 | 0 | 57 | 24.473684 | 80 |
j3ffyang/ai | 15,350,213,161,522 | be58ca39ffabf862aef34dc744d063e11f688c66 | 714e36b745a5b2b5fc4e9b267b3fa214a9fa3d9a | /scripts/basic/turtlecircle.py | 6ffef1def6b34b81e7c175b88a29533451c9aa3e | []
| no_license | https://github.com/j3ffyang/ai | e89b4618c96e2085f37047c88d95f89d0a5409c9 | 5da753d2a1c9793564a32ac80911c1d2e35e8605 | refs/heads/master | 2022-12-10T21:12:48.432682 | 2020-08-12T07:56:11 | 2020-08-12T07:56:11 | 141,972,057 | 2 | 1 | null | false | 2022-11-22T02:55:29 | 2018-07-23T06:37:15 | 2020-08-12T07:56:21 | 2022-11-22T02:55:26 | 7,604 | 2 | 1 | 9 | Python | false | false | # -*- coding: utf-8 -*-
import turtle as t
import random
t.setup(width=600, height=600)
t.speed(500)
t.bgcolor('black')
t.color('lime') #comment out for random colors
d = 120
colors = ["maroon","olive","blue","orange","purple","coral","khaki"]
for i in range(140,0,-20):
d = d-20
for j in range(0,380,10):
#color = random.choice(colors) #uncomment for random colors
#t.color(color) #uncomment for random colors
#rndw = random.randint(2,5) #uncomment for random widths
#t.width(rndw) #uncomment for random widths
t.pu()
t.fd(i)
t.pd()
t.circle(d)
t.pu()
t.home()
t.rt(j)
t.ht()
t.exitonclick() | UTF-8 | Python | false | false | 693 | py | 279 | turtlecircle.py | 252 | 0.585859 | 0.544012 | 0 | 25 | 26.76 | 68 |
AsadullahGalib007/Python-Playground | 5,781,026,015,334 | a9efa6ce0730f24866c2a5c573174934eb32c488 | 4116a4e4d63de4472c406bfbaf7c7fbd13580338 | /6. Networking/assignment.py | 621c58031e570814b5476c565d95b2406512f35e | []
| no_license | https://github.com/AsadullahGalib007/Python-Playground | 7444978727efac10546fc464474d98527fc0210d | 62f39f1d40f5ce55da3b74ffdbcf4b56864c32d0 | refs/heads/master | 2023-01-02T17:47:45.534005 | 2020-10-07T14:06:23 | 2020-10-07T14:06:23 | 280,812,632 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import re
url = input('Enter a URL: ')
if len(url) < 1: url = 'http://py4e-data.dr-chuck.net/comments_815754.html'
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
# print(soup)
total = 0
lines = soup('span')
for line in lines:
line = line.decode()
x = re.findall('[0-9]+',line)
x = int(x[0])
total = total + x
print(total) | UTF-8 | Python | false | false | 446 | py | 44 | assignment.py | 37 | 0.683857 | 0.654709 | 0 | 20 | 21.35 | 75 |
SwipeMe/SwipeMe | 11,355,893,537,051 | 7324488c16797e07b15ba514472cef4b2d9add0c | a2d870251ccca0d3ac0268f7b1d425714b3eff75 | /controllers/sms_mocker_page.py | 44347c8ad02319f8bd2bc9975af34c0deb4f1646 | []
| no_license | https://github.com/SwipeMe/SwipeMe | e6bcef9877d4dcf10768da10f609927440aa533b | 59f123351841b2c4a3dbba1a87007eda19e1f558 | refs/heads/master | 2016-03-17T17:08:43.648970 | 2014-12-17T19:01:12 | 2014-12-17T19:01:12 | 23,877,792 | 0 | 1 | null | false | 2014-12-09T04:25:01 | 2014-09-10T14:35:18 | 2014-12-03T23:56:57 | 2014-12-09T04:25:01 | 2,664 | 1 | 1 | 2 | Python | null | null | from base_handler import *
from models.customer import Customer
from models.mock_data import MockData
import logging
class SMSMockerPage(BaseHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template("sms.html")
self.response.write(template.render())
#'this' instead of 'self' to avoid name conflict within hook()/unhook()
# the hook can call the containing method's self
#
#Expected payload: 'command' <'on' or 'off'>
def post(this):
logging.info("Trying to set the hook.")
data = json.loads(this.request.body)
command = data['command']
logging.info(str(Customer.buyer))
monkey = None
if command == 'on':
logging.info("Setting hook")
def hook(self,body):
if self.customer_type == self.buyer:
MockData.receive_SMS(msg=body,customer_type='buyer')
elif self.customer_type == self.seller:
MockData.receive_SMS(msg=body,customer_type='seller')
monkey = hook
elif command == 'off':
def unhook(self,body):
if body:
taskqueue.add(url='/q/sms', params={'to': self.phone_number, 'body': body})
monkey = unhook
if monkey:
Customer.send_message = monkey
| UTF-8 | Python | false | false | 1,349 | py | 34 | sms_mocker_page.py | 25 | 0.581171 | 0.581171 | 0 | 38 | 34.473684 | 95 |
rafagonc/ReadingList-Server | 9,431,748,206,143 | e6c2796852faf10ed86ee3fa303ce7ef1e238d10 | 756e7121df4b454db7cd4b152e975fb48d5dec90 | /models/user_books.py | 40d9e26d2514618aad27b5c3122ce995e109c4db | []
| no_license | https://github.com/rafagonc/ReadingList-Server | d1786283051a23ec4fa2b30e1f04f62571239e97 | 1dcf52e21fbf663054dbc308a08158d7b4808ad0 | refs/heads/master | 2021-05-24T04:32:13.657515 | 2016-10-07T19:44:51 | 2016-10-07T19:44:51 | 49,616,930 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from db import db
from sqlalchemy import Column, String, Integer, ForeignKey, Float, Boolean
from sqlalchemy.orm import relationship
from models.book import Book
from sqlalchemy.ext.hybrid import hybrid_property
from models.note import Note
class UserBooks(db.Model):
__tablename__ = "red_user_books"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("red_user.id"))
book_id = Column(Integer, ForeignKey("red_book.id"))
pages = Column(Integer)
pages_read = Column(Integer)
rate = Column(Float)
loved = Column(Boolean)
snippet = Column(String)
cover_url = Column(String)
notes = relationship("Note", backref='user_book', lazy='dynamic')
def __init__(self, user, book, pages_read=0, pages=0, rate=0, snippet=""):
self.user_id = user.id
self.book_id = book.id
self.pages = pages
self.pages_read = pages_read
self.rate = rate
self.snippet = snippet
@hybrid_property
def book(self):
return Book.query.filter(Book.id == self.book_id).first()
@hybrid_property
def notes_list(self):
return self.notes.all()
def add_notes(self, notes_dicts):
for note_dict in notes_dicts:
note = None
if note_dict.has_key('id'):
notes = filter(lambda x: x.id == note_dict['id'], self.notes)
if len(notes) > 0:
note = notes[0]
else:
note = Note(self, note_dict['text'])
self.notes.append(note)
db.session.add(note)
else:
note = Note(self, note_dict['text'])
self.notes.append(note)
db.session.add(note)
note.text = note_dict['text']
db.session.commit()
| UTF-8 | Python | false | false | 1,836 | py | 66 | user_books.py | 64 | 0.578976 | 0.576253 | 0 | 56 | 31.785714 | 78 |
nadirhamid/binner | 2,911,987,868,376 | a79f4edd58fa3c505372307664dc779b80eef1da | 9be51722d94917a13c1aedf74d29389b39625642 | /binner/binner_main.py | 3341a1fee45aaba8a8a94acd18e6f09fb74e9398 | [
"MIT"
]
| permissive | https://github.com/nadirhamid/binner | ac7e3f6f32bef7bf98cf3222c602a5cebb802473 | 292236e6bd4668aa904c2cd9a00cacc792984603 | refs/heads/master | 2020-04-12T09:32:27.517896 | 2018-01-20T01:14:40 | 2018-01-20T01:14:40 | 27,346,355 | 29 | 10 | MIT | false | 2018-06-29T05:40:10 | 2014-11-30T18:56:34 | 2018-06-28T22:02:25 | 2018-06-29T05:39:48 | 2,038 | 17 | 9 | 0 | Python | false | null |
import json
"""
Base of Binner takes a set of
arguments as bin sizes, items. Items
need to have the following traits
This object should be used for output
of stats: including (for each bin):
Packed Items:
Space Taken:
Weight Taken:
The Packing Time:
"""
class Binner(object):
lost_items = []
lost_bins = []
packed_bins = []
smallest = {} ## only available when algorithm find_smallest is ran
def __init__(self, args, id, bins, items ):
self.args = args
self.id = id
self.bins = bins
self.items = items
"""
add a bin
@param bin
"""
def add_bin(self, bin_):
pass
"""
add an item we couldnt
find a measurement for
@param: item
"""
def add_lost(self, item):
pass
"""
get all the packed bins
in json ready form
"""
def get_packed_bins(self):
bins = []
for bin_key,bin in self.bins.items.iteritems():
if bin.used:
bins.append(bin.to_dict())
return bins
"""
sets the smallest bin having
all the items per the allocation
"""
def set_smallest(self, bin):
self.smallest = bin
"""
get the smallest bin out of a
set of bins
"""
def get_smallest(self):
return self.smallest
"""
show the output
having finished the
algorithm
"""
def show(self):
from . import log
if self.args.algorithm == "smallest":
smallest = self.get_smallest()
if smallest:
result =dict(smallest=self.get_smallest().to_dict())
else:
result = dict(smallest=False)
else:
lost_items = []
for k, item in self.items.items.iteritems():
if not item.used:
lost_items.append( item.to_dict() )
log.debug("Result for PACKED items")
result = dict(lost=lost_items,
run=dict(id=self.id),
packed=self.get_packed_bins())
log.debug( result )
return result
| UTF-8 | Python | false | false | 1,855 | py | 30 | binner_main.py | 28 | 0.613477 | 0.613477 | 0 | 97 | 18.092784 | 69 |
RemiGascou/security-assessment-iot | 17,008,070,511,495 | e929f3373adb37de01abef7cf63a6066aee47de6 | ed91ef3f193c79e4d3bba8fc2b788aebee37b611 | /old_test/apps/desktop_app/lib/ui/__init__.py | e77b4954d734add47df330a3fed34e9d2495a9db | []
| no_license | https://github.com/RemiGascou/security-assessment-iot | 3b32b66624d103e7b6df0c74b2af5ed464498261 | a579e5646d60a98a3925c8e0122bff4f87ad3d2a | refs/heads/master | 2020-04-09T02:32:37.577656 | 2019-05-31T13:09:22 | 2019-05-31T13:09:22 | 159,944,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from lib.ui.widgets import *
from lib.ui.windows import *
| UTF-8 | Python | false | false | 83 | py | 82 | __init__.py | 44 | 0.650602 | 0.638554 | 0 | 4 | 19.75 | 28 |
MatusMaruna/Posenet-Squats-Evaluator | 17,686,675,334,328 | a7f22882840eddd7859bc350d8ba18d593c40115 | 47b09f01e94d513611723a538f713eaf1ac79db5 | /kinect/estimator.py | abd0473f71012476279f7c598a8ecc48ae58a934 | [
"Apache-2.0"
]
| permissive | https://github.com/MatusMaruna/Posenet-Squats-Evaluator | 4b0d619d7630a4829b2548d2fc663f12735383ac | 6acddd6982ba43fcfacd54e3f99ef56ef35073da | refs/heads/master | 2023-05-31T11:59:37.546399 | 2021-07-11T23:52:01 | 2021-07-11T23:52:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
from kinect.model import KinectModel
class KinectEstimator:
def __init__(self, kinect_config, model_path=None):
self._config = kinect_config
self._model_path = model_path or self._config.model.path
self._ml_model = KinectModel(self._config)
self._ml_model.compile()
self._session = tf.Session()
self._session.run(tf.global_variables_initializer())
self._ml_model.load(self._model_path)
self._feature_processor = self._load_feature_processor()
self._label_processor = self._load_label_processor()
self._columns = ['head_x', 'head_y', 'head_z', 'left_shoulder_x', 'left_shoulder_y',
'left_shoulder_z', 'left_elbow_x', 'left_elbow_y', 'left_elbow_z', 'right_shoulder_x',
'right_shoulder_y', 'right_shoulder_z', 'right_elbow_x', 'right_elbow_y', 'right_elbow_z',
'left_hand_x', 'left_hand_y', 'left_hand_z', 'right_hand_x', 'right_hand_y', 'right_hand_z',
'left_hip_x', 'left_hip_y', 'left_hip_z', 'right_hip_x', 'right_hip_y', 'right_hip_z',
'left_knee_x', 'left_knee_y', 'left_knee_z', 'right_knee_x', 'right_knee_y', 'right_knee_z',
'left_foot_x', 'left_foot_y', 'left_foot_z', 'right_foot_x', 'right_foot_y', 'right_foot_z']
def predict(self, input_features):
input_features = self._feature_processor.transform(input_features)
predictions = self._ml_model.predict(input_features)
predictions = self._label_processor.inverse_transform(predictions)
return predictions
def predict3d(self, input_features, return_df=False):
predictions_z = self.predict(input_features)
features_3d = []
for x_y, z in zip(input_features, predictions_z):
record_3d = self.get_3d_record(x_y, z)
features_3d.append(record_3d)
if return_df:
features_3d = pd.DataFrame(data=features_3d, columns=self._columns)
else:
features_3d = np.array(features_3d)
return features_3d
def get_3d_record(self, x_y_features, z_features):
record_3d = []
for index, z in enumerate(z_features):
x_y = x_y_features[(index * 2):((index * 2) + 2)]
x_y_z = list(x_y) + [z]
record_3d += x_y_z
return record_3d
def _load_feature_processor(self):
with open(self._config.model.feature_processor_path, "rb") as f:
processor = pickle.load(f)
return processor
def _load_label_processor(self):
with open(self._config.model.label_processor_path, "rb") as f:
processor = pickle.load(f)
return processor
| UTF-8 | Python | false | false | 2,826 | py | 31 | estimator.py | 26 | 0.588464 | 0.582095 | 0 | 62 | 44.580645 | 117 |
caoziyao/lang | 16,509,854,306,430 | 7e65a16016f04e9efd606c14e7f4b76f98da050c | 344e535ccbd9109618b959f8c96a799a3d6fe558 | /main.py | 2b624ecdf72086f01e351cb6ac07fe37ef41a047 | []
| no_license | https://github.com/caoziyao/lang | 2349eb618df848fe6e3451d03779bac30f7ad7c0 | 5f24233e49509f1f943a2398a49d99b20e06d757 | refs/heads/master | 2021-10-22T08:01:24.997764 | 2019-03-09T04:02:11 | 2019-03-09T04:02:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
from compiler.parser_descent.lexical_analysis import lexical_analysis
from compiler.parser_descent.syntax_analysis import syntax_analysis
from compiler.backend.ir.three_address import IRTree
from compiler.backend.gen.gen_register_vm import CodeRen
from vm.vm_register import VM
def exper_test():
"""
:return:
"""
# s1 = 'a = (10 + 1) * 1 * (3 + 1) '
s1 = 'a = 1 + 2 * 4'
# s1 = 'a = 12'
# s1 = 'a < 2'
t1 = lexical_analysis(s1)
print('token_list', t1)
# root = syntax_analysis(t1)
# print('expr', root)
#
# return root
def for_test():
s2 = 'for ( a = 1; a < 19; a = a + 1) { b = a * 2 }'
t2 = lexical_analysis(s2)
root = syntax_analysis(t2)
print('expr', root)
return root
def if_test():
s2 = 'if ( 9 < 4) { b = 3 } else { b = 2 }'
t2 = lexical_analysis(s2)
root = syntax_analysis(t2)
print('expr', root)
return root
def arr_test():
s2 = 'a = [1, 3, 14]'
t2 = lexical_analysis(s2)
print('t2', t2)
root = syntax_analysis(t2)
print('expr', root)
# return root
def main():
root = arr_test()
# quads = []
# ir = IRTree(quads)
# ir.gen(root)
#
# print('========')
# for t in quads:
# print(t)
# #
# gen = CodeRen()
# bs = gen.gen(quads)
#
# print('========')
# for b in bs:
# print(b)
#
# vm = VM(bs)
# r, m = vm.run()
#
# print('========')
# print(m)
if __name__ == '__main__':
main()
#
#
# def mtest():
# while True:
# try:
# s = input('cal > ')
# except EOFError:
# break
#
#
#
# def main():
# """
# var b = 134;
# print b;
# if (b > 10)
# {
# print "1"
# print "2"
# }
# else
# {
# print "3"
# };
#
# var a = 1;
# while ( a < 3 )
# {
# print "4"
# a = a + 1
# }
# :return:
# var a = 2;
#
# def (a) {
# print a
# }
# """
# codes = """
#
# def abc ( ) {
# print "hello"
# };
#
# call abc;
# call abc;
# """
# lines = codes.split(';')
#
# asms = []
# asms_func = []
# for l in lines:
# code = l.strip()
# if code == '':
# continue
#
# asm, kind = compiler(code)
# if kind == Kind.kdef:
# asms_func.extend(asm)
# else:
# asms.extend(asm)
#
# asms = asms_func + ['start:'] + asms
# print('asm', asms)
# vm = VM(asms)
# vm.run()
#
#
# def compiler(code):
# lexer.input(code)
# for t in lexer:
# # print(t)
# pass
#
# # 语法树
# root = parser.parse(code)
#
# # s = Sematic(root)
# # s.sem()
#
# # 中间代码
# tree = IRTree(root)
# ir = tree.gen()
#
# # 代码生成
# g = CodeRen(ir)
# asm = g.gen()
#
# return asm, root.type
#
#
# def main():
# with open('c.lan', 'r') as f:
# s = f.read()
# lines = s.split(';')
# asms, func = [], []
#
# for l in lines:
# code = l.strip()
# if code == '':
# continue
#
# asm, kind = compiler(code)
# if kind == Kind.kdef:
# func.extend(asm)
# else:
# asms.extend(asm)
#
# asms = func + ['start:'] + asms
# vm = VM(asms)
# vm.run()
# if __name__ == '__main__':
# main()
| UTF-8 | Python | false | false | 3,596 | py | 64 | main.py | 56 | 0.409905 | 0.392837 | 0 | 197 | 17.142132 | 69 |
SeaWaterr/django- | 1,434,519,083,782 | 192e362f804e69c86e9932d7e96d16229a5dbf5e | db0620f594bca36f3215a293276d2b7d22590352 | /goods/models.py | 6027ee7f8c154bac800cbcac96c6bafef843bdc2 | []
| no_license | https://github.com/SeaWaterr/django- | 733aeb589360d21a8362ce82c403df3ffcdebe3e | fbb5694d393738972c4ee4c0a4d1e10eb0fef0a9 | refs/heads/master | 2020-04-09T21:35:20.878137 | 2018-12-06T02:42:14 | 2018-12-06T02:42:14 | 160,606,706 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class Food(models.Model):
name = models.CharField(max_length=30)
price = models.IntegerField()
sum = models.IntegerField()
id = models.CharField(max_length=30, primary_key=True)
# 一对多
warehouse = models.ForeignKey('Warehouse', on_delete=models.CASCADE)
production = models.ForeignKey('Production', on_delete=models.CASCADE)
class1 = models.ForeignKey('Class1', on_delete=models.CASCADE)
# 多对多
ingre = models.ManyToManyField('Ingre')
class Drink(models.Model):
name = models.CharField(max_length=30)
price = models.IntegerField()
sum = models.IntegerField()
id = models.CharField(max_length=30, primary_key=True)
# 一对多
warehouse = models.ForeignKey('Warehouse', on_delete=models.CASCADE)
production = models.ForeignKey('Production', on_delete=models.CASCADE)
class1 = models.ForeignKey('Class1', on_delete=models.CASCADE)
# 多对多
ingre = models.ManyToManyField('Ingre')
class Electrical(models.Model):
name = models.CharField(max_length=30)
price = models.IntegerField()
sum = models.IntegerField()
id = models.CharField(max_length=30, primary_key=True)
# 一对多
warehouse = models.ForeignKey('Warehouse', on_delete=models.CASCADE)
production = models.ForeignKey('Production', on_delete=models.CASCADE)
class1 = models.ForeignKey('Class1', on_delete=models.CASCADE)
# 多对多
ingre = models.ManyToManyField('Ingre')
class Production(models.Model):
name = models.CharField(max_length=30)
addr = models.CharField(max_length=30)
boss = models.CharField(max_length=30)
id = models.CharField(max_length=30, primary_key=True)
class Ingre(models.Model):
name = models.CharField(max_length=30, primary_key=True)
color = models.CharField(max_length=30)
class Class1(models.Model):
borderid = models.CharField(max_length=30, primary_key=True)
typeid = models.CharField(max_length=30)
class Warehouse(models.Model):
name_id = models.CharField(max_length=30)
id = models.CharField(max_length=30, primary_key=True)
in_time = models.DateField()
no_time = models.DateField()
| UTF-8 | Python | false | false | 2,227 | py | 11 | models.py | 4 | 0.70744 | 0.689639 | 0 | 68 | 31.220588 | 74 |
zoemrob/beetle-etl | 4,252,017,648,189 | c7912e270f93871e1d0f2ca715f403b2964ce50b | 04186337c84af9885400d77d63ee6ba3bd7f21db | /python-src/Beetle_Src/BeetleETL/__init__.py | c0b71a3b58a5b2e272ac7efaa703d7a60ab83d92 | [
"Apache-2.0"
]
| permissive | https://github.com/zoemrob/beetle-etl | bd13212690fcb61c6ce50a05dfd3e73153356d9a | c4adc723c9f9c2d4b8b05a2b234d21cc3a4bc4ea | refs/heads/master | 2020-03-30T03:28:14.205492 | 2018-09-28T05:52:48 | 2018-09-28T05:52:48 | 150,690,867 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# version should match the version in the setup
__version__ = '1.0.5' | UTF-8 | Python | false | false | 70 | py | 33 | __init__.py | 18 | 0.685714 | 0.642857 | 0 | 2 | 34 | 47 |
manbygod/kt-ai-pair-programming-web-crawling | 1,013,612,282,636 | 4af096c468bac4be228845f3b41fcf67247eccaf | cd10dc5b1869532249220ed1c9c80b1368571c85 | /tools/download.py | 21e15ce041941a59ac183ec4d6b526addf5d7452 | []
| no_license | https://github.com/manbygod/kt-ai-pair-programming-web-crawling | 7a84202562f413ae6d359ed53b1cc767162e4cba | a5d52f2338bf6e900a32288afbf332c7de3f680e | refs/heads/master | 2022-07-27T14:00:44.474654 | 2020-05-18T08:30:17 | 2020-05-18T08:30:17 | 264,843,036 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import requests
def downloadImg(keyword, binary_list, link_list):
os.makedirs(f'images/download_google/{keyword}', exist_ok=True)
for i, binary in enumerate(binary_list):
with open(f'images/download_google/{keyword}/{i}.jpg', 'wb') as f:
f.write(binary)
for i, link in enumerate(link_list):
res = requests.get(link)
with open(f'images/download_google/{keyword}/{i}-1.jpg', 'wb') as f:
f.write(res.content) | UTF-8 | Python | false | false | 505 | py | 14 | download.py | 7 | 0.60396 | 0.60198 | 0 | 15 | 31.8 | 76 |
tilman/compositional_elements | 11,321,533,792,961 | 6ab4a3dc3265352d46ce58a3153e68e268dd0bc7 | 3efc06b97806402f82943a28894c90b914b7c70e | /compoelem/detect/openpose_wrapper.py | f4157fbbe6301964ab0dc2a49ae803eee6dc1a70 | [
"MIT"
]
| permissive | https://github.com/tilman/compositional_elements | 3c58164c1663abea1934405271ebf3892bdf9e1d | 45271196ed01d0515357c7abdf35d6b87f2036d5 | refs/heads/master | 2023-05-28T06:28:37.043250 | 2021-06-11T12:18:20 | 2021-06-11T12:18:20 | 320,545,826 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
from .openpose.lib.utils.common import Human
from typing import Sequence
import torch
import torch.nn as nn
from . import converter
from .openpose.lib.network.rtpose_vgg import get_model
from .openpose.evaluate.coco_eval import get_outputs
from .openpose.lib.utils.paf_to_pose import paf_to_pose_cpp
from .openpose.lib.config import cfg, update_config
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', help='experiment configure file name',
default='./compoelem/detect/openpose/experiments/vgg19_368x368_sgd.yaml', type=str)
parser.add_argument('--weight', type=str,
default='./compoelem/detect/openpose/pose_model.pth')
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args = {
# "cfg":'./compoelem/detect/openpose/experiments/vgg19_368x368_sgd.yaml',
# "opts":[],
# "weight":'./compoelem/detect/openpose/pose_model.pth',
# }
# update config file
update_config(cfg, args)
model = get_model('vgg19')
model.load_state_dict(torch.load(args.weight))
model = nn.DataParallel(model)
model.float()
model.eval()
def get_poses(img: Sequence[Sequence[float]]) -> Sequence[Human]:
with torch.no_grad():
paf, heatmap, im_scale = get_outputs(img, model, 'rtpose')
# print(im_scale)
humans = paf_to_pose_cpp(heatmap, paf, cfg)
return humans | UTF-8 | Python | false | false | 1,507 | py | 73 | openpose_wrapper.py | 57 | 0.683477 | 0.671533 | 0 | 45 | 32.511111 | 103 |
collective/Products.CallProfiler | 5,042,291,632,505 | 724678c41ef86fa18f4e34ec617bc4049b7b51b7 | f18f55992621ad60db086f03e47c6f7d07db8aea | /src/Products/CallProfiler/CallProfiler.py | fbea81a5c01114eba54e7b793efc1571ec5db486 | [
"MIT"
]
| permissive | https://github.com/collective/Products.CallProfiler | ec9a93ddc12b2e687befc7605bb44c9a22af74f8 | c0ad37bd3464d39b49e70527bf0a347002e47442 | refs/heads/master | 2023-08-30T01:10:39.993086 | 2015-06-16T20:18:36 | 2015-06-16T20:18:36 | 2,787,691 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) 2002 ekit.com Inc (http://www.ekit-inc.com/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# $Id: CallProfiler.py,v 1.12 2002/02/08 04:57:22 rjones Exp $
from App.class_init import InitializeClass
from App.special_dtml import HTMLFile
from OFS.SimpleItem import Item
from Acquisition import Implicit
from Persistence import Persistent
from AccessControl import ClassSecurityInfo
from AccessControl import ModuleSecurityInfo
modulesecurity = ModuleSecurityInfo()
# get the profiler store
from profiler import profiler
def profiler_call_hook(self, *args, **kw):
'''A call hook
'''
mt = self.meta_type
sid = self.getId()
profiler.startCall(mt, sid)
try:
return self.profiler_call_original(*args, **kw)
finally:
profiler.endCall()
def profiler_publish_hook(request, *args, **kw):
'''Publisher hook
'''
profiler.startRequest(request)
import ZPublisher.Publish
try:
return ZPublisher.Publish.profiler_publish_original(request, *args, **kw) # noqa
finally:
# if we die here, we want to catch it or the publisher will get
# confused...
try:
profiler.endRequest()
except:
# log the error though
from zLOG import LOG, ERROR
import sys
LOG('CallProfiler.publish_hook', ERROR,
'Error during endmark()', error=sys.exc_info())
class Profileable:
def __init__(self, module, klass, method):
self.module = module
self.method = method
# get the actual class to patch
try:
mod = __import__(module)
except ImportError:
self.klass = None
return
components = module.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
self.klass = getattr(mod, klass)
self.name = self.klass.meta_type
self.icon = None
if hasattr(self.klass, 'icon'):
self.icon = self.klass.icon
def install(self):
'''Install the call hook
'''
self.klass.profiler_call_original = getattr(self.klass, self.method)
setattr(self.klass, self.method, profiler_call_hook)
def uninstall(self):
'''Uninstall the call hook
'''
setattr(self.klass, self.method, self.klass.profiler_call_original)
del self.klass.profiler_call_original
def isInstalled(self):
'''See if the call hook has been installed
'''
return hasattr(self.klass, 'profiler_call_original')
def isAvailable(self):
'''See if the module is actually available
'''
return self.klass is not None
def checkbox(self):
'''Display a checkbox to configure this
'''
if self.isInstalled():
s = 'CHECKED'
else:
s = ''
return '<input name="enabled:list" type="checkbox" value="%s"%s>%s' % (
self.name, s, self.name)
profileable_modules = {
'Page Template': Profileable('Products.PageTemplates.PageTemplates',
'PageTemplates', '__call__'),
'DTML Method': Profileable('OFS.DTMLMethod', 'DTMLMethod', '__call__'),
'MLDTMLMethod': Profileable('Products.MLDTML.MLDTML',
'MLDTMLMethod', '__call__'),
'Z SQL Method': Profileable('Products.ZSQLMethods.SQL', 'SQL', '__call__'),
'Python Method': Profileable('Products.PythonMethod.PythonMethod',
'PythonMethod', '__call__'),
'Script (Python)': Profileable('Products.PythonScripts.PythonScript',
'PythonScript', '_exec'),
'Filesystem Script (Python)':
Profileable('Products.CMFCore.FSPythonScript', 'FSPythonScript',
'__call__'),
'Filesystem DTML Method':
Profileable('Products.CMFCore.FSDTMLMethod', 'FSDTMLMethod',
'__call__'),
'Filesystem Page Template':
Profileable('Products.CMFCore.FSPageTemplate', 'FSPageTemplate',
'__call__'),
}
class CallProfiler(Item, Implicit, Persistent):
'''An instance of this class provides an interface between Zope and
roundup for one roundup instance
'''
id = 'CallProfiler'
title = meta_type = 'Call Profiler'
security = ClassSecurityInfo()
# define the tabs for the management interface
manage_options = (
{'label': 'Configure', 'action': 'configureForm'},
{'label': 'Results', 'action': 'results'},
{'label': 'Results by URL', 'action': 'resultsByURL'},
{'label': 'Aggregates', 'action': 'aggregates'},
) + Item.manage_options
#
# Configuration interface
#
# configuration form
configureForm = HTMLFile('dtml/configure', globals())
detail = HTMLFile('dtml/detail', globals())
results = HTMLFile('dtml/results', globals())
aggregates = HTMLFile('dtml/aggregates', globals())
aggregateDetail = HTMLFile('dtml/aggregateDetail', globals())
resultsByURL = HTMLFile('dtml/resultsByURL', globals())
security.declareProtected('View management screens', 'configureForm',
'detail', 'results', 'resultsByURL')
security.declareProtected('View management screens', 'getComponentModules')
def getComponentModules(self):
'''List the components available to profile
'''
l = []
names = profileable_modules.keys()
names.sort()
for name in names:
if profileable_modules[name].isAvailable():
l.append((name, profileable_modules[name]))
return l
security.declareProtected('View management screens', 'monitorAll')
def monitorAll(self):
'''Set to monitor all that we can
'''
enabled = [x[0] for x in self.getComponentModules()]
return self.configure(enabled=enabled)
security.declareProtected('View management screens', 'monitorNone')
def monitorNone(self):
'''Set to monitor no calls
'''
return self.configure()
security.declareProtected('View management screens', 'configure')
def configure(self, enabled=[]):
'''Set the given items to enabled
'''
# install or uninstall the publisher hook as required
if not enabled and self.isPublisherHookInstalled():
self.uninstallPublisherHook()
elif enabled and not self.isPublisherHookInstalled():
self.installPublisherHook()
# now install the selected modules
for component, module in self.getComponentModules():
if component in enabled and not module.isInstalled():
module.install()
elif component not in enabled and module.isInstalled():
module.uninstall()
if not enabled:
message = 'all profiling disabled'
else:
message = ', '.join(enabled) + ' enabled'
return self.configureForm(self, self.REQUEST,
manage_tabs_message=message)
security.declarePrivate('installPublisherHook')
def installPublisherHook(self):
'''Set the ZPublisher hook
'''
import ZPublisher.Publish
ZPublisher.Publish.profiler_publish_original = ZPublisher.Publish.publish # noqa
ZPublisher.Publish.publish = profiler_publish_hook
security.declarePrivate('uninstallPublisherHook')
def uninstallPublisherHook(self):
'''Remove the ZPublisher hook
'''
import ZPublisher.Publish
ZPublisher.Publish.publish = ZPublisher.Publish.profiler_publish_original # noqa
del ZPublisher.Publish.profiler_publish_original
security.declareProtected('View management screens',
'isPublisherHookInstalled')
def isPublisherHookInstalled(self):
'''Detect the presence of the publisher hook
'''
import ZPublisher.Publish
return hasattr(ZPublisher.Publish, 'profiler_publish_original')
#
# Results handling code
#
security.declareProtected('View management screens', 'clear')
def clear(self):
'''Clear the current results
'''
profiler.reset()
return self.configureForm(self, self.REQUEST,
manage_tabs_message='cleared')
security.declareProtected('View management screens', 'resultsOverTime')
def summary(self):
'''Calculate summary info
'''
#sort = self.REQUEST['sort']
# if sort:
# return profiler.listTransactions(sort=sort)
#rsort = self.REQUEST['rsort']
# if rsort:
# return profiler.listTransactions(rsort=rsort)
return profiler.listTransactions(sort='time_start')
security.declareProtected('View management screens', 'summaryByURL')
def summaryByURL(self):
'''Calculate some summary info
'''
l = profiler.listTransactions(sort='time_start')
# print up the summary
summary = {}
for transaction in l:
tt = transaction.time_total
url = transaction.url
if url in summary:
d = summary[url]
d['min'] = min(d['min'], tt)
d['max'] = max(d['max'], tt)
d['tot'] += tt
d['num'] += 1
d['ave'] = d['tot'] / d['num']
d['transactions'].append((tt, transaction))
else:
summary[url] = {'min': tt, 'max': tt, 'tot': tt, 'num': 1,
'ave': tt, 'transactions': [(tt, transaction)],
'truncated_url': transaction.truncated_url}
summary = summary.items()
summary.sort()
return summary
security.declareProtected('View management screens', 'validTID')
def validTID(self, tid):
'''Determine if the tid is valid
'''
return profiler.hasTID(tid)
security.declareProtected('View management screens', 'detailResults')
def detailResults(self, tid):
'''Show a detailed result
'''
transaction = profiler.transaction[tid]
# do the HTML extra bits
pm = profileable_modules
for depth, info in transaction.listEvents():
if 'events' in info:
info['treepart'] = '| ' * depth + '+-'
if info['events'] and 'time_processing' in info:
percent = info['percentage_processing']
time_display = info['time_processing']
else:
percent = info['percentage']
time_display = info['time_total']
else:
info['treepart'] = '| ' * depth
percent = info['percentage']
time_display = info['time_total']
info['time_display'] = time_display
info['percentage_display'] = percent
info['percentage_int'] = int(percent / 2)
info['icon'] = ''
if 'meta_type' in info:
module = pm[info['meta_type']]
if module.icon:
info['icon'] = module.icon
if percent > 10:
info['colour'] = '#ffbbbb'
elif percent > 5:
info['colour'] = '#ffdbb9'
elif percent > 3:
info['colour'] = '#fff9b9'
else:
info['colour'] = ''
return transaction
security.declareProtected('View management screens', 'colour_key')
def colour_key(self):
'''Draw a table with the highlight colours
'''
return '''The colours used in the table highlight the calls that take
a high percentage of the total time for the request:
<table border=0 cellpadding=2 cellspacing=2>
<tr><td bgcolor="white">0-3%</td>
<td bgcolor="#fff9b9">3-5%</td>
<td bgcolor="#ffdbb9">5-10%</td>
<td bgcolor="#ffbbbb">10+%</td>
</tr></table>'''
security.declareProtected('View management screens', 'aggregateResults')
def aggregateResults(self):
'''Generate aggregated results for the calls - where the call patterns
exactly match by URL
'''
return profiler.aggregateResults()
security.declareProtected('View management screens',
'aggregateDetailResults')
def aggregateDetailResults(self, tid, show_all=0):
'''Generate table row cells for the given transaction
'''
agg = profiler.aggregateDetailResults(tid)
# do the HTML extra bits
pm = profileable_modules
for depth, info in agg.listEvents():
if 'events' in info:
info['treepart'] = '| ' * depth + '+-'
if info['events'] and 'ave_time_processing' in info:
min_percent = info['min_percentage_processing']
percent = info['ave_percentage_processing']
max_percent = info['max_percentage_processing']
min_time_display = info['min_time_processing']
time_display = info['ave_time_processing']
max_time_display = info['max_time_processing']
else:
min_percent = info['min_percentage']
percent = info['ave_percentage']
max_percent = info['max_percentage']
min_time_display = info['min_time_total']
time_display = info['ave_time_total']
max_time_display = info['max_time_total']
else:
info['treepart'] = '| ' * depth
min_percent = info['min_percentage']
percent = info['ave_percentage']
max_percent = info['max_percentage']
min_time_display = info['min_time_total']
time_display = info['ave_time_total']
max_time_display = info['max_time_total']
info['min_time_display'] = min_time_display
info['time_display'] = time_display
info['max_time_display'] = max_time_display
info['min_percentage_display'] = min_percent
info['percentage_display'] = percent
info['max_percentage_display'] = max_percent
info['icon'] = ''
if 'meta_type' in info:
module = pm[info['meta_type']]
if module.icon:
info['icon'] = module.icon
info['percentage_int'] = int(percent / 2)
if percent > 10:
info['colour'] = '#ffbbbb'
elif percent > 5:
info['colour'] = '#ffdbb9'
elif percent > 3:
info['colour'] = '#fff9b9'
else:
info['colour'] = ''
return agg
InitializeClass(CallProfiler)
modulesecurity.apply(globals())
#
# $Log: CallProfiler.py,v $
# Revision 1.12 2002/02/08 04:57:22 rjones
# typo
#
# Revision 1.11 2002/02/08 03:18:55 rjones
# got meta_type images in there... much nicer
#
# Revision 1.10 2002/02/07 23:12:48 rjones
# Fixes to the data gathering and display
#
# Revision 1.9 2002/02/07 05:09:11 rjones
# Better call stack handling
#
# Revision 1.8 2002/02/06 00:33:55 rjones
# Lots of data handling improvements:
# . moved the data handling part off into a separate module
# . that module has some basic unit tests
#
# Revision 1.7 2002/02/05 22:11:02 rjones
# Fixes
#
# Revision 1.6 2002/02/05 04:50:13 rjones
# Fixes, aggregation, oh my! :)
#
# Revision 1.5 2002/02/01 05:42:17 rjones
# fixes
#
# Revision 1.4 2002/01/31 23:11:36 rjones
# copyright and CVS comment cleanups
#
# Revision 1.3 2002/01/31 06:19:17 rjones
# Now adds itself to the Control Panel, and isn't available for adding elsewhere.
#
# Revision 1.2 2002/01/31 05:03:27 rjones
# adding CallProfiler to HEAD
#
# Revision 1.1.2.6 2002/01/31 04:16:33 rjones
# Some more cleanups
#
# Revision 1.1.2.5 2002/01/31 04:09:42 rjones
# More cleanups in the profiler code so we can get at the data more easily
#
# Revision 1.1.2.4 2002/01/31 00:50:08 rjones
# Profiler now patches the Zope modules as required:
# . ZPublisher/Publish.py publish function
# . others as defined in the profileable_modules dict in CallProfiler.py
#
# Revision 1.1.2.3 2002/01/30 07:36:00 rjones
# split off the processing code from the display code - should be easy enough
# to do the render in ZPT or *shudder* DTML.
#
# Revision 1.1.2.2 2002/01/30 05:41:33 rjones
# cosmetic changes
#
# Revision 1.1.2.1 2002/01/30 04:48:38 rjones
# CallProfiler initial version
#
| UTF-8 | Python | false | false | 17,750 | py | 9 | CallProfiler.py | 5 | 0.596282 | 0.576901 | 0 | 487 | 35.447639 | 89 |
chang-jing0/ycsh_python_course | 2,499,670,971,338 | fa7c65a9e2892573e1bfde49a1dd1d5bc8bd5358 | b32eeffea571c3529178229df50cbf232962b6b3 | /期末考/sort_n.py | 2f09834d130458c63e2beac571372b6c56131ced | []
| no_license | https://github.com/chang-jing0/ycsh_python_course | 6362de2789cf69f7777a58565f81801eaa5b6c0e | 1397a404aea48b19d648c2582e1200ec4e767487 | refs/heads/master | 2023-02-25T16:04:26.060826 | 2021-02-03T12:37:31 | 2021-02-03T12:37:31 | 299,842,056 | 0 | 0 | null | true | 2020-09-30T07:29:25 | 2020-09-30T07:29:24 | 2020-09-30T01:45:52 | 2020-09-30T01:45:50 | 1 | 0 | 0 | 0 | null | false | false | x = []
n = int(input('要排序幾個數 : '))
for i in range(n):
y = int(input('請輸入第' + str(i+1) + '個數'))
x.append(y)
x.sort()
x.reverse()
for j in range(n):
print(x[j]) | UTF-8 | Python | false | false | 192 | py | 40 | sort_n.py | 40 | 0.52381 | 0.517857 | 0 | 13 | 12 | 42 |
madhumallidi/my-python-app | 1,924,145,362,243 | a3b526302c016d6ce657638a3f693edf3df9cd97 | 75dc59c2c3f57dafedeceef8d3481251d387a104 | /Practice3.py | 4f67450a2380f453bee8dd4ae5e5348a2e4080fc | []
| no_license | https://github.com/madhumallidi/my-python-app | 9fcd283bd369930e647d6575246a40ee58248691 | eaf49a0a21013c7202b8588a721f98e99ed336d4 | refs/heads/master | 2020-03-30T16:07:19.486116 | 2018-10-14T06:48:58 | 2018-10-14T06:48:58 | 151,394,012 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # practice session to write the code of If, For, List
fruits = ['Apple', 'Banana', 'Cherry']
print('Banana' in fruits)
if 'Banana' in fruits:
print('Yes it is')
for x in fruits:
print(x)
fruits.append('Orange')
print(fruits)
fruits.insert(2,'Pineapple')
print(fruits)
fruits.remove('Pineapple')
print(fruits)
fruits.pop()
print(fruits)
del fruits[0]
print(fruits)
myTuple = ('SP', 'DP', 'DV', 'FM', 'PM')
print(myTuple)
thisSet = {'banana', 'orange', 'pink'}
print(thisSet)
thisSet.add('yellow')
print(thisSet)
thisSet.update(['white', 'black'])
print(thisSet)
thisSet.remove('banana')
print(thisSet)
thisSet.discard('black')
print(thisSet)
thisSet.discard('black')
print(thisSet)
# thisSet.remove('banana')
# print(thisSet)
item = thisSet.pop()
print(item)
| UTF-8 | Python | false | false | 784 | py | 22 | Practice3.py | 21 | 0.684949 | 0.682398 | 0 | 53 | 13.792453 | 53 |
yjshm/test | 8,383,776,208,645 | bf6ee8ddf64bf848c01c970047f9cf15165cd0cc | 7fcac755cc4a906330961746b69d2f28763f1f0c | /youpornScrapy/youporn/build/lib/youporn/pipelines.py | ba3ee473865e247e176600c353f5f26c09ec82d9 | []
| no_license | https://github.com/yjshm/test | c054ec1e31918448b99cc76665bef654f5046c12 | 8e321ef8fd2cdc763cc98cf0c2fc727d584a9ac7 | refs/heads/master | 2021-01-25T06:49:18.012340 | 2019-09-04T10:17:33 | 2019-09-04T10:17:33 | 93,615,340 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import lxml
import xmltodict
import copy
from xml.dom.minidom import Document
from operator import itemgetter # itemgetter用来去dict中的key,省去了使用lambda函数
from itertools import groupby # itertool还包含有其他很多函数,比如将多个list联合起来。。
import json
RELAY_SEVER_HEAD = 'http://192.168.127.254:8080/infytb'
class YoupornPipeline(object):
items = None
def __init__(self):
##self.fo = open('youpornsave.txt', 'wb')
self.items = []
def process_item(self, item, spider):
# sorted(item.items(), key=lambda i:i["class1"])
'''
self.fo.write((item['class1'] + '\n').encode('utf-8'))
self.fo.write((item['class2'] + '\n').encode('utf-8'))
self.fo.write((item['class3'] + '\n').encode('utf-8'))
self.fo.write((item['title'] + '\n').encode('utf-8'))
self.fo.write((item['link'] + '\n').encode('utf-8'))
self.fo.write((item['description'] + '\n').encode('utf-8'))
self.fo.write((item['guid'] + '\n').encode('utf-8'))
self.fo.write((item['pubDate'] + '\n').encode('utf-8'))
self.fo.flush()
'''
# 这里必须返回item,否则程序会一直等待,直到返回item为止
item2 = {}
item2['class1'] = item['class1']
item2['class2'] = item['class2']
item2['class3'] = item['class3']
item2['title'] = item['title']
item2['link'] = item['link']
item2['description'] = item['description']
item2['guid'] = item['guid']
item2['pubDate'] = item['pubDate']
self.items.append(item2)
return item
def close_spider(self, spider):
# 保存所有单个分类点播RSS
self.items.sort(key=itemgetter('class1','class2','class3')) # 需要先排序,然后才能groupby。lst排序后自身被改变
items_group = groupby(self.items, itemgetter('class1', 'class2','class3'))
for key, group in items_group:
rss = {
'rss': {'channel':
{
'item': []
}
}
}
for item in group: # group是一个迭代器,包含了所有的分组列表
# print key,item
item2 = {}
item2['title'] = item['title']
item2['link'] = item['link']
item2['description'] = item['description']
item2['guid'] = item['guid']
item2['pubDate'] = item['pubDate']
rss['rss']['channel']['item'].append(item2)
# print(result)
self.save_as_xml_rss(rss, './rss/youporn_{}_rss.xml'.format(item['class3']))
# 保存类型RSS
self.items.sort(key=itemgetter('class1','class2','class3')) # 需要先排序,然后才能groupby。lst排序后自身被改变
struct_data = self.convert_struct(self.items)
self.save_class_as_xml(struct_data, './rss/youporn_categorie_rss.xml')
#self.fo.close()
# 保存单个类型rss文件
def save_as_xml_rss(self, rss, file_name):
xml = ''
try:
xml = xmltodict.unparse(rss, encoding='utf-8')
finally:
with codecs.open(file_name, 'w', 'utf-8') as f:
f.write(xml)
return True
# items数数转化成结构化分类数据
# 返回 : {'Action': {'ACTION':{'Teen':'http://www.xxx.com/xxrss.xml'} }
def convert_struct(self, rss):
class1s = {}
for item in rss:
xml_class1 = item['class1']
xml_class2 = item['class2']
xml_class3 = item['class3']
if xml_class1 not in class1s:
class1s[xml_class1] = {}
if xml_class2 not in class1s[xml_class1]:
class1s[xml_class1][xml_class2] = {}
if xml_class3 not in class1s[xml_class1][xml_class2]:
class1s[xml_class1][xml_class2][xml_class3] = {}
class1s[xml_class1][xml_class2][xml_class3] = '{}/youporn_{}_rss.xml'.format(RELAY_SEVER_HEAD, item['class3'])
return class1s
# 保存全部分类文件
def save_class_as_xml(self, rss, file_name):
doc = Document()
rss_xml = doc.createElement('rss')
categorie = doc.createElement('categorie')
for class1_name,class1_item in rss.items() :
class1 = doc.createElement('class1')
class1_title = doc.createElement('title')
objectcontenttext = doc.createTextNode(class1_name)
class1_title.appendChild(objectcontenttext)
class1.appendChild(class1_title)
for class2_name, class2_item in class1_item.items():
class2 = doc.createElement('class2')
class2_title = doc.createElement('title')
objectcontenttext = doc.createTextNode(class2_name)
class2_title.appendChild(objectcontenttext)
class2.appendChild(class2_title)
for class3_name, class3_item in class2_item.items():
item = doc.createElement('item')
item_title = doc.createElement('title')
objectcontenttext = doc.createTextNode(class3_name)
item_title.appendChild(objectcontenttext)
item.appendChild(item_title)
item_link = doc.createElement('link')
objectcontenttext = doc.createTextNode(class3_item)
item_link.appendChild(objectcontenttext)
item.appendChild(item_link)
class2.appendChild(item)
class1.appendChild(class2)
categorie.appendChild(class1)
rss_xml.appendChild(categorie)
doc.appendChild(rss_xml)
with open(file_name, 'w') as fp:
doc.writexml(fp, indent='\t', newl='\n', addindent='\t', encoding='utf-8')
| UTF-8 | Python | false | false | 6,174 | py | 30 | pipelines.py | 13 | 0.554949 | 0.533788 | 0 | 147 | 38.863946 | 122 |
jehovahxu/ca-gan | 19,086,834,702,865 | 247f27f33fcc3cde91e92874782e43c01eb70a96 | ef1446daf000ab4e6450b13121e9fcb45a17bc38 | /test.py | 19832411aac47dc65973c53b9ef242e09c696068 | []
| no_license | https://github.com/jehovahxu/ca-gan | d6224b4113a9a9fc8cd465657194e24df14eed78 | 3e3b92f5e3313a0c526829a05951f3ffc95923b3 | refs/heads/master | 2020-12-03T12:40:48.311404 | 2020-06-09T08:37:05 | 2020-06-09T08:37:05 | 231,320,240 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import torch
from torch.autograd import Variable
import numpy as np
import os
import torchvision.utils as vutils
from data import *
from model import *
import option
from torch.utils.data import DataLoader
from myutils.Unet2 import *
opt = option.init()
norm_layer = get_norm_layer(norm_type='batch')
netG = MyUnetGenerator(opt.input_nc, opt.output_nc, 8, opt.ngf, norm_layer=norm_layer, \
use_dropout=False, gpu_ids=opt.gpu_ids)
netE = MyEncoder(opt.input_nc, opt.output_nc, 8, opt.ngf, norm_layer=norm_layer, \
use_dropout=False, gpu_ids=opt.gpu_ids)
fold = opt.test_epoch
netG.load_state_dict(torch.load('./checkpoint/netG_epoch_'+fold+'.weight'))
netE.load_state_dict(torch.load('./checkpoint/netE_epoch_'+fold+'.weight'))
netE.cuda()
netG.cuda()
test_set = DatasetFromFolder(opt, False)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=1, shuffle=False)
# netG = UnetGenerator(opt.input_nc, opt.output_nc, 8, opt.ngf, norm_layer=norm_layer, use_dropout=False, gpu_ids=opt.gpu_ids)
if not os.path.exists(opt.output):
os.makedirs(opt.output)
save_dir_A = opt.output + "/" + fold
if not os.path.exists(save_dir_A):
os.makedirs(save_dir_A)
for i, batch in enumerate(testing_data_loader):
real_p, real_s, identity = Variable(batch[0]), Variable(batch[1]), Variable(batch[2].squeeze(1))
real_p, real_s, identity = real_p.cuda(), real_s.cuda(), identity.cuda()
# parsing = real_p[:, 3:, :, :]
# real_p, real_s = real_s, real_p[:, 0:3, :, :]
# real_p = torch.cat([real_p, parsing], 1)
parsing_feature = netE(real_p[:, 3:, :, :])
fake_s1 = netG.forward(real_p[:, 0:3, :, :], parsing_feature)
# fake_s1[:, 1, :, :],fake_s1[:, 2, :, :], fake_s1[:, 0, :, :] = fake_s1[:, 0, :, :], fake_s1[:, 1, :, :], fake_s1[:, 2, :, :]
output_name_A = '{:s}/{:s}{:s}'.format(
save_dir_A, str(i + 1), '.jpg')
vutils.save_image(fake_s1[:, :, 3:253, 28:228], output_name_A, normalize=True, scale_each=True)
# fake_s1 = fake_s1.squeeze(0)
# fake_s1 = np.transpose(fake_s1.data.cpu().numpy(), (1, 2, 0)) / 2 + 0.5
# img = fake_s1[3:253, 28:228, :]
# cc = (img * 255).astype(np.uint8)
# cv2.imwrite(output_name_A, cc)
print " saved"
| UTF-8 | Python | false | false | 2,288 | py | 4 | test.py | 3 | 0.631556 | 0.602273 | 0 | 58 | 38.396552 | 130 |
openstack/glance | 5,909,875,004,240 | 0c6d3939b9c1f84f93b4300bae10d05089c2e124 | 57767ccd77d484ea60001f28d90270d1e34ac974 | /glance/tests/unit/test_notifier.py | 14f525f87f2c594c5e228dffdf4d5addcfa80901 | [
"Apache-2.0"
]
| permissive | https://github.com/openstack/glance | ff459174fb502ac9b5030ab2d1aafc2fa5e40475 | 11af8f0ed5fcd53ab3865a40ae50e467a0c06e6c | refs/heads/master | 2023-09-04T06:55:03.257371 | 2023-09-02T03:13:30 | 2023-09-02T03:13:30 | 2,155,157 | 389 | 498 | Apache-2.0 | false | 2020-12-15T05:36:34 | 2011-08-04T15:05:19 | 2020-12-15T00:39:28 | 2020-12-15T05:36:31 | 30,262 | 481 | 508 | 0 | Python | false | false | # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from unittest import mock
import glance_store
from oslo_config import cfg
import oslo_messaging
import webob
import glance.async_
from glance.common import exception
from glance.common import timeutils
import glance.context
from glance import notifier
import glance.tests.unit.utils as unit_test_utils
from glance.tests import utils
DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355)
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
class ImageStub(glance.domain.Image):
def get_data(self, offset=0, chunk_size=None):
return ['01234', '56789']
def set_data(self, data, size, backend=None, set_active=True):
for chunk in data:
pass
class ImageRepoStub(object):
def remove(self, *args, **kwargs):
return 'image_from_get'
def save(self, *args, **kwargs):
return 'image_from_save'
def add(self, *args, **kwargs):
return 'image_from_add'
def get(self, *args, **kwargs):
return 'image_from_get'
def list(self, *args, **kwargs):
return ['images_from_list']
class ImageMemberRepoStub(object):
def remove(self, *args, **kwargs):
return 'image_member_from_remove'
def save(self, *args, **kwargs):
return 'image_member_from_save'
def add(self, *args, **kwargs):
return 'image_member_from_add'
def get(self, *args, **kwargs):
return 'image_member_from_get'
def list(self, *args, **kwargs):
return ['image_members_from_list']
class TaskStub(glance.domain.TaskStub):
def run(self, executor):
pass
class Task(glance.domain.Task):
def succeed(self, result):
pass
def fail(self, message):
pass
class TaskRepoStub(object):
def remove(self, *args, **kwargs):
return 'task_from_remove'
def save(self, *args, **kwargs):
return 'task_from_save'
def add(self, *args, **kwargs):
return 'task_from_add'
def get_task(self, *args, **kwargs):
return 'task_from_get'
def list(self, *args, **kwargs):
return ['tasks_from_list']
class TestNotifier(utils.BaseTestCase):
@mock.patch.object(oslo_messaging, 'Notifier')
@mock.patch.object(oslo_messaging, 'get_notification_transport')
def _test_load_strategy(self,
mock_get_transport, mock_notifier,
url, driver):
nfier = notifier.Notifier()
mock_get_transport.assert_called_with(cfg.CONF)
self.assertIsNotNone(nfier._transport)
mock_notifier.assert_called_with(nfier._transport,
publisher_id='image.localhost')
self.assertIsNotNone(nfier._notifier)
def test_notifier_load(self):
self._test_load_strategy(url=None, driver=None)
@mock.patch.object(oslo_messaging, 'set_transport_defaults')
def test_set_defaults(self, mock_set_trans_defaults):
notifier.set_defaults(control_exchange='foo')
mock_set_trans_defaults.assert_called_with('foo')
notifier.set_defaults()
mock_set_trans_defaults.assert_called_with('glance')
class TestImageNotifications(utils.BaseTestCase):
"""Test Image Notifications work"""
def setUp(self):
super(TestImageNotifications, self).setUp()
self.image = ImageStub(
image_id=UUID1, name='image-1', status='active', size=1024,
created_at=DATETIME, updated_at=DATETIME, owner=TENANT1,
visibility='public', container_format='ami', virtual_size=2048,
tags=['one', 'two'], disk_format='ami', min_ram=128,
min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91',
locations=['http://127.0.0.1'])
self.context = glance.context.RequestContext(tenant=TENANT2,
user=USER1)
self.image_repo_stub = ImageRepoStub()
self.notifier = unit_test_utils.FakeNotifier()
self.image_repo_proxy = glance.notifier.ImageRepoProxy(
self.image_repo_stub, self.context, self.notifier)
self.image_proxy = glance.notifier.ImageProxy(
self.image, self.context, self.notifier)
def test_image_save_notification(self):
self.image_repo_proxy.save(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.update', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_image_save_notification_disabled(self):
self.config(disabled_notifications=["image.update"])
self.image_repo_proxy.save(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_add_notification(self):
self.image_repo_proxy.add(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.create', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_image_add_notification_disabled(self):
self.config(disabled_notifications=["image.create"])
self.image_repo_proxy.add(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_delete_notification(self):
self.image_repo_proxy.remove(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.delete', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
self.assertTrue(output_log['payload']['deleted'])
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_image_delete_notification_disabled(self):
self.config(disabled_notifications=['image.delete'])
self.image_repo_proxy.remove(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_get(self):
image = self.image_repo_proxy.get(UUID1)
self.assertIsInstance(image, glance.notifier.ImageProxy)
self.assertEqual('image_from_get', image.repo)
def test_image_list(self):
images = self.image_repo_proxy.list()
self.assertIsInstance(images[0], glance.notifier.ImageProxy)
self.assertEqual('images_from_list', images[0].repo)
def test_image_get_data_should_call_next_image_get_data(self):
with mock.patch.object(self.image, 'get_data') as get_data_mock:
self.image_proxy.get_data()
self.assertTrue(get_data_mock.called)
def test_image_get_data_notification(self):
self.image_proxy.size = 10
data = ''.join(self.image_proxy.get_data())
self.assertEqual('0123456789', data)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.send', output_log['event_type'])
self.assertEqual(self.image.image_id,
output_log['payload']['image_id'])
self.assertEqual(TENANT2, output_log['payload']['receiver_tenant_id'])
self.assertEqual(USER1, output_log['payload']['receiver_user_id'])
self.assertEqual(10, output_log['payload']['bytes_sent'])
self.assertEqual(TENANT1, output_log['payload']['owner_id'])
def test_image_get_data_notification_disabled(self):
self.config(disabled_notifications=['image.send'])
self.image_proxy.size = 10
data = ''.join(self.image_proxy.get_data())
self.assertEqual('0123456789', data)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_get_data_size_mismatch(self):
self.image_proxy.size = 11
list(self.image_proxy.get_data())
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.send', output_log['event_type'])
self.assertEqual(self.image.image_id,
output_log['payload']['image_id'])
def test_image_set_data_prepare_notification(self):
insurance = {'called': False}
def data_iterator():
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.prepare', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
self.assertEqual(['store1', 'store2'], output_log['payload'][
'os_glance_importing_to_stores'])
self.assertEqual([],
output_log['payload']['os_glance_failed_import'])
yield 'abcd'
yield 'efgh'
insurance['called'] = True
self.image_proxy.extra_properties[
'os_glance_importing_to_stores'] = 'store1,store2'
self.image_proxy.extra_properties['os_glance_failed_import'] = ''
self.image_proxy.set_data(data_iterator(), 8)
self.assertTrue(insurance['called'])
def test_image_set_data_prepare_notification_disabled(self):
insurance = {'called': False}
def data_iterator():
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
yield 'abcd'
yield 'efgh'
insurance['called'] = True
self.config(disabled_notifications=['image.prepare'])
self.image_proxy.set_data(data_iterator(), 8)
self.assertTrue(insurance['called'])
def test_image_set_data_upload_and_activate_notification(self):
image = ImageStub(image_id=UUID1, name='image-1', status='queued',
created_at=DATETIME, updated_at=DATETIME,
owner=TENANT1, visibility='public')
context = glance.context.RequestContext(tenant=TENANT2, user=USER1)
fake_notifier = unit_test_utils.FakeNotifier()
image_proxy = glance.notifier.ImageProxy(image, context, fake_notifier)
def data_iterator():
fake_notifier.log = []
yield 'abcde'
yield 'fghij'
image_proxy.extra_properties[
'os_glance_importing_to_stores'] = 'store2'
image_proxy.extra_properties[
'os_glance_importing_to_stores'] = 'store1,store2'
image_proxy.extra_properties['os_glance_failed_import'] = ''
image_proxy.set_data(data_iterator(), 10)
output_logs = fake_notifier.get_logs()
self.assertEqual(2, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
self.assertEqual(['store2'], output_log['payload'][
'os_glance_importing_to_stores'])
self.assertEqual([],
output_log['payload']['os_glance_failed_import'])
output_log = output_logs[1]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.activate', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
def test_image_set_data_upload_and_not_activate_notification(self):
insurance = {'called': False}
def data_iterator():
self.notifier.log = []
yield 'abcde'
yield 'fghij'
self.image_proxy.extra_properties[
'os_glance_importing_to_stores'] = 'store2'
insurance['called'] = True
self.image_proxy.set_data(data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
self.assertTrue(insurance['called'])
def test_image_set_data_upload_and_activate_notification_disabled(self):
insurance = {'called': False}
image = ImageStub(image_id=UUID1, name='image-1', status='queued',
created_at=DATETIME, updated_at=DATETIME,
owner=TENANT1, visibility='public')
context = glance.context.RequestContext(tenant=TENANT2, user=USER1)
fake_notifier = unit_test_utils.FakeNotifier()
image_proxy = glance.notifier.ImageProxy(image, context, fake_notifier)
def data_iterator():
fake_notifier.log = []
yield 'abcde'
yield 'fghij'
insurance['called'] = True
self.config(disabled_notifications=['image.activate', 'image.upload'])
image_proxy.set_data(data_iterator(), 10)
self.assertTrue(insurance['called'])
output_logs = fake_notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_set_data_storage_full(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise glance_store.StorageFull(message='Modern Major General')
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Modern Major General', output_log['payload'])
def test_image_set_data_value_error(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise ValueError('value wrong')
self.assertRaises(webob.exc.HTTPBadRequest,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('value wrong', output_log['payload'])
def test_image_set_data_duplicate(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.Duplicate('Cant have duplicates')
self.assertRaises(webob.exc.HTTPConflict,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Cant have duplicates', output_log['payload'])
def test_image_set_data_storage_write_denied(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise glance_store.StorageWriteDenied(message='The Very Model')
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('The Very Model', output_log['payload'])
def test_image_set_data_forbidden(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.Forbidden('Not allowed')
self.assertRaises(webob.exc.HTTPForbidden,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Not allowed', output_log['payload'])
def test_image_set_data_not_found(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.NotFound('Not found')
self.assertRaises(webob.exc.HTTPNotFound,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Not found', output_log['payload'])
def test_image_set_data_HTTP_error(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise webob.exc.HTTPError('Http issue')
self.assertRaises(webob.exc.HTTPError,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Http issue', output_log['payload'])
def test_image_set_data_error(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.GlanceException('Failed')
self.assertRaises(exception.GlanceException,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Failed', output_log['payload'])
class TestImageMemberNotifications(utils.BaseTestCase):
"""Test Image Member Notifications work"""
def setUp(self):
super(TestImageMemberNotifications, self).setUp()
self.context = glance.context.RequestContext(tenant=TENANT2,
user=USER1)
self.notifier = unit_test_utils.FakeNotifier()
self.image = ImageStub(
image_id=UUID1, name='image-1', status='active', size=1024,
created_at=DATETIME, updated_at=DATETIME, owner=TENANT1,
visibility='public', container_format='ami',
tags=['one', 'two'], disk_format='ami', min_ram=128,
min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91',
locations=['http://127.0.0.1'])
self.image_member = glance.domain.ImageMembership(
id=1, image_id=UUID1, member_id=TENANT1, created_at=DATETIME,
updated_at=DATETIME, status='accepted')
self.image_member_repo_stub = ImageMemberRepoStub()
self.image_member_repo_proxy = glance.notifier.ImageMemberRepoProxy(
self.image_member_repo_stub, self.image,
self.context, self.notifier)
self.image_member_proxy = glance.notifier.ImageMemberProxy(
self.image_member, self.context, self.notifier)
def _assert_image_member_with_notifier(self, output_log, deleted=False):
self.assertEqual(self.image_member.member_id,
output_log['payload']['member_id'])
self.assertEqual(self.image_member.image_id,
output_log['payload']['image_id'])
self.assertEqual(self.image_member.status,
output_log['payload']['status'])
self.assertEqual(timeutils.isotime(self.image_member.created_at),
output_log['payload']['created_at'])
self.assertEqual(timeutils.isotime(self.image_member.updated_at),
output_log['payload']['updated_at'])
if deleted:
self.assertTrue(output_log['payload']['deleted'])
self.assertIsNotNone(output_log['payload']['deleted_at'])
else:
self.assertFalse(output_log['payload']['deleted'])
self.assertIsNone(output_log['payload']['deleted_at'])
def test_image_member_add_notification(self):
self.image_member_repo_proxy.add(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.member.create', output_log['event_type'])
self._assert_image_member_with_notifier(output_log)
def test_image_member_add_notification_disabled(self):
self.config(disabled_notifications=['image.member.create'])
self.image_member_repo_proxy.add(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_member_save_notification(self):
self.image_member_repo_proxy.save(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.member.update', output_log['event_type'])
self._assert_image_member_with_notifier(output_log)
def test_image_member_save_notification_disabled(self):
self.config(disabled_notifications=['image.member.update'])
self.image_member_repo_proxy.save(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_member_delete_notification(self):
self.image_member_repo_proxy.remove(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.member.delete', output_log['event_type'])
self._assert_image_member_with_notifier(output_log, deleted=True)
def test_image_member_delete_notification_disabled(self):
self.config(disabled_notifications=['image.member.delete'])
self.image_member_repo_proxy.remove(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_member_get(self):
image_member = self.image_member_repo_proxy.get(TENANT1)
self.assertIsInstance(image_member, glance.notifier.ImageMemberProxy)
self.assertEqual('image_member_from_get', image_member.repo)
def test_image_member_list(self):
image_members = self.image_member_repo_proxy.list()
self.assertIsInstance(image_members[0],
glance.notifier.ImageMemberProxy)
self.assertEqual('image_members_from_list', image_members[0].repo)
class TestTaskNotifications(utils.BaseTestCase):
"""Test Task Notifications work"""
def setUp(self):
super(TestTaskNotifications, self).setUp()
task_input = {"loc": "fake"}
self.task_stub = TaskStub(
task_id='aaa',
task_type='import',
status='pending',
owner=TENANT2,
expires_at=None,
created_at=DATETIME,
updated_at=DATETIME,
image_id='fake_image_id',
user_id='fake_user',
request_id='fake_request_id',
)
self.task = Task(
task_id='aaa',
task_type='import',
status='pending',
owner=TENANT2,
expires_at=None,
created_at=DATETIME,
updated_at=DATETIME,
task_input=task_input,
result='res',
message='blah',
image_id='fake_image_id',
user_id='fake_user',
request_id='fake_request_id',
)
self.context = glance.context.RequestContext(
tenant=TENANT2,
user=USER1
)
self.task_repo_stub = TaskRepoStub()
self.notifier = unit_test_utils.FakeNotifier()
self.task_repo_proxy = glance.notifier.TaskRepoProxy(
self.task_repo_stub,
self.context,
self.notifier
)
self.task_proxy = glance.notifier.TaskProxy(
self.task,
self.context,
self.notifier
)
self.task_stub_proxy = glance.notifier.TaskStubProxy(
self.task_stub,
self.context,
self.notifier
)
self.patcher = mock.patch.object(timeutils, 'utcnow')
mock_utcnow = self.patcher.start()
mock_utcnow.return_value = datetime.datetime.utcnow()
def tearDown(self):
super(TestTaskNotifications, self).tearDown()
self.patcher.stop()
def test_task_create_notification(self):
self.task_repo_proxy.add(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.create', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
self.assertEqual(
timeutils.isotime(self.task.updated_at),
output_log['payload']['updated_at']
)
self.assertEqual(
timeutils.isotime(self.task.created_at),
output_log['payload']['created_at']
)
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
# Verify newly added fields 'image_id', 'user_id' and
# 'request_id' are not part of notification yet
self.assertTrue('image_id' not in output_log['payload'])
self.assertTrue('user_id' not in output_log['payload'])
self.assertTrue('request_id' not in output_log['payload'])
def test_task_create_notification_disabled(self):
self.config(disabled_notifications=['task.create'])
self.task_repo_proxy.add(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_delete_notification(self):
now = timeutils.isotime()
self.task_repo_proxy.remove(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.delete', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
self.assertEqual(
timeutils.isotime(self.task.updated_at),
output_log['payload']['updated_at']
)
self.assertEqual(
timeutils.isotime(self.task.created_at),
output_log['payload']['created_at']
)
self.assertEqual(
now,
output_log['payload']['deleted_at']
)
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
# Verify newly added fields 'image_id', 'user_id' and
# 'request_id' are not part of notification yet
self.assertTrue('image_id' not in output_log['payload'])
self.assertTrue('user_id' not in output_log['payload'])
self.assertTrue('request_id' not in output_log['payload'])
def test_task_delete_notification_disabled(self):
self.config(disabled_notifications=['task.delete'])
self.task_repo_proxy.remove(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_run_notification(self):
with mock.patch('glance.async_.TaskExecutor') as mock_executor:
executor = mock_executor.return_value
executor._run.return_value = mock.Mock()
self.task_proxy.run(executor=mock_executor)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.run', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
self.assertFalse(
self.task.image_id in output_log['payload']
)
self.assertFalse(
self.task.user_id in output_log['payload']
)
self.assertFalse(
self.task.request_id in output_log['payload']
)
def test_task_run_notification_disabled(self):
self.config(disabled_notifications=['task.run'])
with mock.patch('glance.async_.TaskExecutor') as mock_executor:
executor = mock_executor.return_value
executor._run.return_value = mock.Mock()
self.task_proxy.run(executor=mock_executor)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_processing_notification(self):
self.task_proxy.begin_processing()
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.processing', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
# Verify newly added fields 'image_id', 'user_id' and
# 'request_id' are not part of notification yet
self.assertTrue('image_id' not in output_log['payload'])
self.assertTrue('user_id' not in output_log['payload'])
self.assertTrue('request_id' not in output_log['payload'])
def test_task_processing_notification_disabled(self):
self.config(disabled_notifications=['task.processing'])
self.task_proxy.begin_processing()
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_success_notification(self):
self.task_proxy.begin_processing()
self.task_proxy.succeed(result=None)
output_logs = self.notifier.get_logs()
self.assertEqual(2, len(output_logs))
output_log = output_logs[1]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.success', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
# Verify newly added fields 'image_id', 'user_id' and
# 'request_id' are not part of notification yet
self.assertTrue('image_id' not in output_log['payload'])
self.assertTrue('user_id' not in output_log['payload'])
self.assertTrue('request_id' not in output_log['payload'])
def test_task_success_notification_disabled(self):
self.config(disabled_notifications=['task.processing', 'task.success'])
self.task_proxy.begin_processing()
self.task_proxy.succeed(result=None)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_failure_notification(self):
self.task_proxy.fail(message=None)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.failure', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
# Verify newly added fields 'image_id', 'user_id' and
# 'request_id' are not part of notification yet
self.assertTrue('image_id' not in output_log['payload'])
self.assertTrue('user_id' not in output_log['payload'])
self.assertTrue('request_id' not in output_log['payload'])
def test_task_failure_notification_disabled(self):
self.config(disabled_notifications=['task.failure'])
self.task_proxy.fail(message=None)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
| UTF-8 | Python | false | false | 34,473 | py | 602 | test_notifier.py | 268 | 0.623096 | 0.612914 | 0 | 840 | 40.039286 | 79 |
ffalconu/proyecto-leng-par | 11,398,843,215,062 | cf6ef40eb10fdcdacae78b5aaddbf610b21866e5 | 9910d135557d85030e6c497b5a939a0e3b233bf8 | /main.py | 10d5ca8228bb88439f20bae2100400028b7cde0c | []
| no_license | https://github.com/ffalconu/proyecto-leng-par | 1bfc47c0c99c0ca5c2ec4c20ba8265027cf7b80d | 92ecd7b2ca33af6feccd15aa174f8c0723b8f930 | refs/heads/master | 2022-11-17T05:59:55.991119 | 2020-07-11T06:07:46 | 2020-07-11T06:07:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from modules.airbnb_scraping import airbnb_scrape
from modules.utils import time_format, save_object_list
from modules.expedia_scraping import get_offers
import time
def main():
city = "Viña del Mar"
checkin = "2020-07-17"
checkout = "2020-07-19"
rooms = 2
adults = 2
children = 2
babies = 1
hosting = [] # Aquí residirán TODOS los resultados
### Airbnb scraping ###
start_time = time.time()
airbnb_hosting_list = airbnb_scrape(city, checkin, checkout, rooms, adults, children, babies)
airbnb_execution_time = (time.time() - start_time)
for object in airbnb_hosting_list:
hosting.append(object)
### Expedia scraping ###
start_time = time.time()
expedia_hosting_list = get_offers(city, checkin, checkout, rooms, adults, children, babies)
expedia_execution_time = (time.time() - start_time)
for object in expedia_hosting_list:
hosting.append(object)
### imprimir resultados ###
print("\nairbnb scraping | "+time_format(airbnb_execution_time)+" | "+str(len(airbnb_hosting_list))+" resultados")
print("\nexpedia scraping | "+time_format(expedia_execution_time)+" | "+str(len(expedia_hosting_list))+" resultados")
### guardar alojamientos en generated.txt ###
save_object_list(hosting)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,337 | py | 2 | main.py | 2 | 0.664918 | 0.649925 | 0 | 39 | 33.205128 | 121 |
AdamC66/04---Programming-Fundamentals-Collections-and-Iteration-Part-3---Ranges | 3,307,124,868,119 | 8a85fe6cdfbaf93f8326e41b8cbf9a9af748cbb2 | 199f15782b8bc3eb4b822e7e4ab72e361a977acf | /ranges.py | 4098e1815b2c6d220382ac5db9124d3a2d4a8455 | []
| no_license | https://github.com/AdamC66/04---Programming-Fundamentals-Collections-and-Iteration-Part-3---Ranges | a1ecdf6372e3a9afa24b315aba1f21e46d2efd1e | 74b200a3592f6c405a722a467ade6e0f7c5b5970 | refs/heads/master | 2020-06-18T21:11:36.097040 | 2019-07-11T19:23:54 | 2019-07-11T19:23:54 | 196,449,668 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Exercise 11
# Let's do our own Bitmaker version of FizzBuzz, which is the name of a classic job interview coding problem.
# Write a program that loops over the numbers from 1 to 100. If the number is a multiple of three, output the string "Bit". For multiples of five, output "Maker". For numbers which are multiples of both three and five, output "BitMaker". Otherwise output the number itself.
# To solve this problem you will likely need to search the web. Start with the particular aspect of the question you are unsure of, such as "check if number is multiple of another python". Do use online resources, but do not read or copy an entire solution to the problem. Make sure the code you submit is your own. You will learn much more if you work through it yourself!
# As always, don't forget to commit your work as you make progress.
for i in range(1,101):
if i%3 ==0 and i%5 ==0:
print("Bitmaker")
elif i%3 ==0:
print("Bit")
elif i%5 == 0:
print("Maker")
else:
print(i)
# Exercise 12
# PizzaMaker wants to handle bulk orders of pizzas, with varying amounts of toppings on each. Ask the user for a number of pizzas - call it quantity.
# We then want to ask the user for quantity more numbers - the number of toppings on that pizza - and print them out as in the following example.
# How many pizzas do you want to order?
# $ 3
# How many toppings for pizza 1?
# $ 5
# You have ordered a pizza with 5 toppings.
# How many toppings for pizza 2?
# $ 1
# You have ordered a pizza with 1 toppings.
# How many toppings for pizza 3?
# $ 4
# You have ordered a pizza with 4 toppings.
# You will need:
# to ask the user for input twice.
# a loop of some kind.
# to make sure your variables are what you think they are! Convert them to integers if needed.
# string interpolation
numpizzas=0
pizzatoppings = []
def get_input(inputstring):
variable=0
wait_for_input=True
while wait_for_input == True:
print(inputstring)
user_input = input()
try:
variable = int(user_input)
wait_for_input = False
return(variable)
except ValueError:
print("Your input was invalid or unrecognized, please enter a number")
numpizzas=get_input("Please enter how many pizza\'s you would like to order")
for i in range(numpizzas):
pizzatoppings.append(get_input("How many toppings for pizza # {}".format(i+1)))
print("you have ordered a pizza with {} toppings\n".format(pizzatoppings[i])) | UTF-8 | Python | false | false | 2,542 | py | 1 | ranges.py | 1 | 0.693155 | 0.680173 | 0 | 66 | 37.530303 | 373 |
viniciustr/movelit | 14,379,550,541,990 | b4114553e965393e996b375adc2ce58dd0010812 | 8f8e78f3d27a393412b123c12378ef348e117a2c | /_updated/tests/test_page.py | 8eef6b8d390bbb9f11401f0e322c0b539cfb4c63 | [
"Apache-2.0"
]
| permissive | https://github.com/viniciustr/movelit | 88e31babaf860bf10c5fe671d360fe78637948b8 | 67808cfc39b268f3c7535d061ad5bfbeb9878572 | refs/heads/master | 2022-05-10T11:33:36.512408 | 2019-09-03T13:52:27 | 2019-09-03T13:52:27 | 154,015,638 | 2 | 0 | Apache-2.0 | false | 2022-03-29T21:55:36 | 2018-10-21T14:12:20 | 2021-06-25T21:00:38 | 2022-03-29T21:55:36 | 3,747 | 1 | 0 | 4 | Python | false | false | from .helpers import TestCase
class TestPage(TestCase):
def test_header(self):
rv = self.client.get('/')
assert "Hello world!" in rv.data
| UTF-8 | Python | false | false | 160 | py | 19 | test_page.py | 9 | 0.6375 | 0.6375 | 0 | 7 | 21.857143 | 40 |
jizhihang/lda-3 | 19,095,424,625,928 | a2de0c152c966b6e10d4550a0182c43619cbd530 | 161f0f22ada648c75ab8e60c0c7f3323d9d89c97 | /data.py | 9bcc726e2e06cc8ac9dd4bae45cdaa520b69089e | []
| no_license | https://github.com/jizhihang/lda-3 | 63c2b5ac2f52f9c3fa886aa6df6d1777e22d8cee | 99fa8488f86aa5961bbfe8716857be2c7b03fac7 | refs/heads/master | 2021-01-01T17:52:55.225559 | 2015-12-14T04:46:33 | 2015-12-14T04:46:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Author : Soubhik Barari
This is an academic project completed for the course COMP 136 : Statistical Pattern Recognition
at Tufts University.
Datasets for evaluation.
"""
artificial = {}
for i in range(1,3):
with open("artificial/%i" % i, "r") as f:
artificial[i] = f.read().split()
print "** read in `artifical' dataset"
data100 = {}
data100v = []
for i in range(1,101):
with open("data100/%i" % i, "r") as f:
data100[i] = f.read().split()
data100v += data100[i]
data100v = list(set(data100v))
print "** read in `data100' dataset"
data100class = {}
with open("data100/index.csv", "r") as f:
line = f.readline()
while line:
doc = int(line.split(",")[0])
label = int(line.split(",")[1])
data100class[doc] = label
line = f.readline()
print "** read in `data100' class labels"
| UTF-8 | Python | false | false | 813 | py | 7 | data.py | 4 | 0.642066 | 0.580566 | 0 | 39 | 19.846154 | 95 |
yuricampolongo/NanodegreeMachineLearning | 1,142,461,323,204 | 5bf1a2f30f4100578ab6e9c8ff464083a6f67266 | 4a4a617e0903750a701dc2afa71a34c3d5f88568 | /Supervised Learning - Boston Housing/boston_housing/boston_housing.py | 181ec0628b524d927bb8cc751b6e5de98057c1c2 | []
| no_license | https://github.com/yuricampolongo/NanodegreeMachineLearning | 6ec634c5bc3d224a651d05fe2a85e3ecc3ce6294 | 916a108cd9d586e953a161868a33664cb8ac6b8f | refs/heads/master | 2021-09-13T10:39:07.307324 | 2018-04-28T13:05:20 | 2018-04-28T13:05:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Importar as bibliotecas necessárias para este projeto
import numpy as np
import pandas as pd
import visuals as vs # Supplementary code
from sklearn.cross_validation import ShuffleSplit
# Formatação mais bonita para os notebooks
%matplotlib inline
# Executar o conjunto de dados de imóveis de Boston
data = pd.read_csv('housing.csv')
prices = data['MEDV']
features = data.drop('MEDV', axis=1)
# Êxito
print "O conjunto de dados de imóveis de Boston tem {} pontos com {} variáveis em cada.".format(*data.shape) | UTF-8 | Python | false | false | 568 | py | 11 | boston_housing.py | 3 | 0.748663 | 0.745098 | 0 | 19 | 28.578947 | 108 |
varecki/tredly | 15,418,932,635,973 | 67b3ae4faabb936d9d95bf97ee1a65a79341411d | 76d05ee6ef2dd37ede14550745d97d7182d9ad29 | /components/tredly-libs/python-common/objects/tredly/parser/tredlyfilefile/__init__.py | d9291b7f9dbdbaaa34f424712d4af579f23e88db | [
"MIT"
]
| permissive | https://github.com/varecki/tredly | 3256757c25e6972d201b1cc2718012601185577d | 74c14aab7c8933f91325698863f4b26bf5491c9e | refs/heads/master | 2021-01-18T09:32:12.678578 | 2016-06-23T05:28:36 | 2016-06-23T05:28:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import os
import json
from pprint import pprint
from objects.tredly.parser.TredlyParser import TredlyParser
from objects.tredly.tredlyfile import *
class TredlyfileParser(TredlyParser):
# Action: Reads a Tredlyfile into a json object
#
# Pre:
# Post: self.filePath (if valid path) has been read in as json
#
# Params:
#
# Return: json object
def read(self):
lines = []
with open(self.filePath) as tredlyFile:
for line in tredlyFile:
line = line.strip().rstrip("\r\n")
# ignore blank lines and those starting with # (comment)
if line.startswith("#") or not len(line):
continue
groups = re.match("^([A-z0-9]+)\s*=\s*(.*)$", line)
if not groups:
continue
command = groups.group(1)
value = groups.group(2)
lines.append([command, value])
with open(os.path.join(os.path.dirname(__file__), "jsonMap.json")) as mapFile:
_map = json.load(mapFile)
container = {
'compatibleWith': None,
'name': None,
'replicate': False,
'resourceLimits': {},
'buildOptions':{},
'customDNS': [],
'proxy':{
'layer4Proxy':False,
'layer7Proxy':[]
},
'firewall':{
"ipv4Whitelist": [],
"allowPorts": {
"tcp": {
"in":[],
"out":[]
},
"udp": {
"in":[],
"out":[]
}
}
},
'operations':{
'onCreate': [],
'onStart': [],
'onStop': []
},
'technicalOptions':{}
}
# adds a key to the json object
def addKey(key1, key2):
if key1 not in container:
container[key1] = {}
def add(v):
if (isinstance(v,str)):
if (len(v) == 0): return
container[key1][key2] = v
return add
def operations(key, type):
def add(v):
op = container['operations']
adding = {'type':type}
if (type == "fileFolderMapping"):
split = v.split(" ")
adding['source'] = split[0]
adding['target'] = split[1]
else:
adding['value'] = v
op[key].append(adding)
return add
def allowPort(type, inOrOut):
def add(v):
if (v is not None):
ports = container['firewall']['allowPorts'][type][inOrOut]
if (isinstance(v,int)): ports.append(int(v))
return add
# add a key to the base directory
def appendKey(key):
keys = key.split(".");
obj = container
for key in keys:
if key not in obj:
obj[key] = []
obj = obj[key]
def add(v):
if (v is not None):
if (isinstance(v,str)):
if (len(v) == 0): return
obj.append(v)
return add
urls = {}
# append url info
def addUrl(index,prop,value):
if not prop: prop = 'url'
# set up redirects
if index not in urls:
urls[index] = {
'cert': None,
'redirects': {}
}
# check if this is a redirect line
isRedirect = re.match("^Redirect(\d+)(.*)", prop)
if (isRedirect) and (val is not None):
redirectIndex = isRedirect.group(1)
redirectProp = isRedirect.group(2).lower() or "url"
# create a dict if it doesnt already exist
if (redirectIndex not in urls[index]['redirects']):
urls[index]['redirects'][redirectIndex] = {
"url": None,
"cert": None
}
urls[index]['redirects'][redirectIndex][redirectProp] = val
# if this attribute doesnt have a cert associated then set it to none
if ('cert' not in urls[index]['redirects'][redirectIndex]):
urls[index]['redirects'][redirectIndex]['cert'] = None
else:
_prop = prop[0].lower() + prop[1::]
if (_prop == "websocket"):
if (val):
_prop = "enableWebsocket"
else: return
urls[index][_prop] = val
# add technicaloptions
def technicalOptions(value):
with open(os.path.join(os.path.dirname(__file__), "technicalOptionsMap.json")) as techOptsMap:
_map = json.load(techOptsMap)
split = value.split('=')
try:
key = _map[split[0]]
val = split[1]
if (key == 'children.max' or key == 'securelevel' or key == 'devfs_ruleset'):
val = int(val)
addKey('technicalOptions', key)(val)
except KeyError:
pass
def resourceLimits(key):
def add(value):
# print(value)
val = str(int(value) * 1024) + 'M'
addKey('resourceLimits', key)(val)
return add
# set the layer 4 proxy value
def layer4Proxy(value):
container['proxy']['layer4Proxy'] = value
funcs = {
'publish': addKey('buildOptions', 'publish'),
'maxCpu': addKey('resourceLimits', 'maxCpu'),
'maxHdd': resourceLimits('maxHdd'),
'maxRam': resourceLimits('maxRam'),
'layer4Proxy': layer4Proxy,
'onStart': operations('onCreate','exec'),
'installPackage': operations('onCreate','installPackage'),
'technicalOptions': technicalOptions,
'ipv4Whitelist': appendKey('firewall.ipv4Whitelist'),
'customDNS': appendKey('customDNS'),
'fileFolderMapping': operations('onCreate','fileFolderMapping'),
'onStop': operations('onStop', 'exec'),
'tcpInPort': allowPort('tcp', 'in'),
'tcpOutPort': allowPort('tcp', 'out'),
'udpInPort': allowPort('udp', 'in'),
'udpOutPort': allowPort('udp', 'out'),
}
# loop over the lines
for line in lines:
key = line[0]
val = line[1].strip().rstrip("\r\n")
# check if this is a url
isUrl = re.match("^url(\d+)(\w+)?", key)
# convert yes/no to true/false
if (isinstance(val,str)):
if (len(val) ==0): continue
# convert yes/no values to boolean
if (val == "yes"):
val = True
elif (val == "no"):
val = False
elif (val.isdigit()): # and digits to int objects
val = int(val)
# There is a mapping function
if key in funcs:
funcs[key](val)
# There is a mapping
elif key in _map:
container[_map[key]] = val
elif (key == "persistentStorageUUID"):
# if the persistent storage object doesnt exist then create it
if ('persistentStorage' not in container.keys()):
container['persistentStorage'] = {}
container['persistentStorage']['identifier'] = val
elif (key == "persistentMountPoint"):
# if the persistent storage object doesnt exist then create it
if ('persistentStorage' not in container.keys()):
container['persistentStorage'] = {}
container['persistentStorage']['mountPoint'] = val
elif isUrl:
# add the url linked key/value
addUrl(isUrl.group(1),isUrl.group(2),key)
# Copy directly
else:
container[key] = val
# loop over urls
for i, url in urls.items():
redirects = []
for j, red in url['redirects'].items():
redirects.append(red)
url['redirects'] = redirects
container['proxy']['layer7Proxy'].append(url)
self.json = { 'container': container }
return self.json
| UTF-8 | Python | false | false | 8,826 | py | 13 | __init__.py | 7 | 0.45332 | 0.448788 | 0 | 264 | 32.431818 | 106 |
acdh-oeaw/mmp | 7,653,631,750,992 | 7caeb80736edd722b34f7176f2db3284a778b7f9 | 5c929ec2036ff04dbaaa78a5939380095b1075b9 | /layers/api_views.py | 7a445124c0a2130a054bb4fb1e58a06bf8604f64 | [
"MIT"
]
| permissive | https://github.com/acdh-oeaw/mmp | 9c0535129446123e32ce5a104447b7432b4e7e05 | 5381aa97757c456d0ce4f68137f5287e57376629 | refs/heads/master | 2023-04-15T06:26:25.605304 | 2023-03-28T12:14:43 | 2023-03-28T12:14:43 | 334,097,604 | 3 | 0 | MIT | false | 2023-03-28T12:14:45 | 2021-01-29T09:32:19 | 2023-01-31T18:42:53 | 2023-03-28T12:14:43 | 3,541 | 4 | 0 | 10 | Python | false | false | import django_filters.rest_framework
from rest_framework import viewsets
from . models import GeoJsonLayer
from . api_serializer import GeoJsonLayerSerializer
class GeoJsonLayerViewSet(viewsets.ModelViewSet):
queryset = GeoJsonLayer.objects.all().distinct()
serializer_class = GeoJsonLayerSerializer
filter_backends = [
django_filters.rest_framework.DjangoFilterBackend,
]
filterset_fields = ['use_case', ]
| UTF-8 | Python | false | false | 438 | py | 153 | api_views.py | 120 | 0.767123 | 0.767123 | 0 | 14 | 30.285714 | 58 |
anuxious/vir | 12,000,138,652,361 | feeb34fdb42f5372f6be339386fa6f62d182b667 | 7a2040e58f650f553fa59bee6987b34f0d307e38 | /main.py | aa60ba20db81898872dbb3ba7660ebc9b33ceac1 | []
| no_license | https://github.com/anuxious/vir | 6c8037c3c4bde1ee78e523a7e328a8ecf2b99e30 | 5a111f847eec71a0f3c33ada1030926de18abdea | refs/heads/main | 2023-07-15T22:40:51.116504 | 2021-08-24T07:24:40 | 2021-08-24T07:24:40 | 399,373,007 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import speech_recognition as sr
import pyttsx3
import webbrowser
from datetime import date, timedelta, datetime
import serial # used to communicate with Arduino board
import pyowm # used to tell the weather
from Keys import OPENWEATHER # Keys.py is where I store all my API keys SHANE will use
import operator # used for math operations
import random # will be used throughout for random response choices
import os # used to interact with the computer's directory
# Speech Recognition Constants
recognizer = sr.Recognizer()
microphone = sr.Microphone()
# Python Text-to-Speech (pyttsx3) Constants
engine = pyttsx3.init()
engine.setProperty('volume', 1.0)
# Wake word in Listen Function
WAKE = "Shane"
# Used to store user commands for analysis
CONVERSATION_LOG = "Conversation Log.txt"
# Initial analysis of words that would typically require a Google search
SEARCH_WORDS = {"who": "who", "what": "what", "when": "when", "where": "where", "why": "why", "how": "how"}
# Establish serial connection for arduino board
try:
ser = serial.Serial('com3', 9600)
LED = True
except serial.SerialException:
print("LEDs are not connected. There will be no lighting support.")
# If the LEDs aren't connected this will allow the program to skip the LED commands.
LED = False
pass
class Shane:
def __init__(self):
self.recognizer = sr.Recognizer()
self.microphone = sr.Microphone()
# Used to hear the commands after the wake word has been said
def hear(self, recognizer, microphone, response):
try:
with microphone as source:
print("Waiting for command.")
recognizer.adjust_for_ambient_noise(source)
recognizer.dynamic_energy_threshold = 3000
# May reduce the time out in the future
audio = recognizer.listen(source, timeout=5.0)
command = recognizer.recognize_google(audio)
s.remember(command)
return command.lower()
except sr.WaitTimeoutError:
pass
except sr.UnknownValueError:
pass
except sr.RequestError:
print("Network error.")
# Used to speak to the user
def speak(self, text):
engine.say(text)
engine.runAndWait()
# Used to open the browser or specific folders
def open_things(self, command):
# Will need to expand on "open" commands
if command == "open youtube":
s.speak("Opening YouTube.")
webbrowser.open("https://www.youtube.com/channel/UCW34Ghe9-_TCA5Vy3-Agfnw")
pass
elif command == "open facebook":
s.speak("Opening Facebook.")
webbrowser.open("https://www.facebook.com")
pass
elif command == "open my documents":
s.speak("Opening My Documents.")
os.startfile("C:/Users/Notebook/Documents")
pass
elif command == "open my downloads folder":
s.speak("Opening your downloads folder.")
os.startfile("C:/Users/Notebook/Downloads")
pass
else:
s.speak("I don't know how to open that yet.")
pass
# Used to track the date of the conversation, may need to add the time in the future
def start_conversation_log(self):
today = str(date.today())
today = today
with open(CONVERSATION_LOG, "a") as f:
f.write("Conversation started on: " + today + "\n")
# Writes each command from the user to the conversation log
def remember(self, command):
with open(CONVERSATION_LOG, "a") as f:
f.write("User: " + command + "\n")
# Used to answer time/date questions
def understand_time(self, command):
today = date.today()
now = datetime.now()
if "today" in command:
s.speak("Today is " + today.strftime("%B") + " " + today.strftime("%d") + ", " + today.strftime("%Y"))
elif command == "what time is it":
s.speak("It is " + now.strftime("%I") + now.strftime("%M") + now.strftime("%p") + ".")
elif "yesterday" in command:
date_intent = today - timedelta(days=1)
return date_intent
elif "this time last year" in command:
current_year = today.year
if current_year % 4 == 0:
days_in_current_year = 366
else:
days_in_current_year = 365
date_intent = today - timedelta(days=days_in_current_year)
return date_intent
elif "last week" in command:
date_intent = today - timedelta(days=7)
return date_intent
else:
pass
def get_weather(self, command):
home = 'Bossier City, Louisiana'
owm = pyowm.OWM(OPENWEATHER)
mgr = owm.weather_manager()
if "now" in command:
observation = mgr.weather_at_place(home)
w = observation.weather
temp = w.temperature('fahrenheit')
status = w.detailed_status
s.speak("It is currently " + str(int(temp['temp'])) + " degrees and " + status)
else:
print("I haven't programmed that yet.")
# If we're doing math, this will return the operand to do math with
def get_operator(self, op):
return {
'+': operator.add,
'-': operator.sub,
'x': operator.mul,
'divided': operator.__truediv__,
'Mod': operator.mod,
'mod': operator.mod,
'^': operator.xor,
}[op]
# We'll need a list to perform the math
def do_math(self, li):
# passes the second item in our list to get the built-in function operand
op = self.get_operator(li[1])
# changes the strings in the list to integers
int1, int2 = int(li[0]), int(li[2])
# this uses the operand from the get_operator function against the two intengers
result = op(int1, int2)
s.speak(str(int1) + " " + li[1] + " " + str(int2) + " equals " + str(result))
# Checks "what is" to see if we're doing math
def what_is_checker(self, command):
number_list = {"1", "2", "3", "4", "5", "6", "7", "8", "9"}
# First, we'll make a list a out of the string
li = list(command.split(" "))
# Then we'll delete the "what" and "is" from the list
del li[0:2]
if li[0] in number_list:
self.do_math(li)
elif "what is the date today" in command:
self.understand_time(command)
else:
self.use_search_words(command)
# Checks the first word in the command to determine if it's a search word
def use_search_words(self, command):
s.speak("Here is what I found.")
webbrowser.open("https://www.google.com/search?q={}".format(command))
# Analyzes the command
def analyze(self, command):
try:
if command.startswith('open'):
self.open_things(command)
# USED ONLY FOR YOUTUBE PURPOSES
# if command == "take over the world":
# s.speak("Skynet activated.")
# listening_byte = "T" # T matches the Arduino sketch code for the blinking red color
# ser.write(listening_byte.encode("ascii")) # encodes and sends the serial byte
elif command == "introduce yourself":
s.speak("I am Shane. I'm a digital assistant.")
elif command == "what time is it":
self.understand_time(command)
elif command == "how are you":
current_feelings = ["I'm okay.", "I'm doing well. Thank you.", "I am doing okay."]
# selects a random choice of greetings
greeting = random.choice(current_feelings)
s.speak(greeting)
elif "weather" in command:
self.get_weather(command)
elif "what is" in command:
self.what_is_checker(command)
# Keep this at the end
elif SEARCH_WORDS.get(command.split(' ')[0]) == command.split(' ')[0]:
self.use_search_words(command)
else:
s.speak("I don't know how to do that yet.")
if LED:
listening_byte = "H" # H matches the Arduino sketch code for the green color
ser.write(listening_byte.encode("ascii")) # encodes and sends the serial byte
except TypeError:
print("Warning: You're getting a TypeError somewhere.")
pass
except AttributeError:
print("Warning: You're getting an Attribute Error somewhere.")
pass
# Used to listen for the wake word
def listen(self, recognizer, microphone):
while True:
try:
with microphone as source:
print("Listening.")
recognizer.adjust_for_ambient_noise(source)
recognizer.dynamic_energy_threshold = 3000
audio = recognizer.listen(source, timeout=5.0)
response = recognizer.recognize_google(audio)
if response == WAKE:
if LED:
listening_byte = "L" # L matches the Arduino sketch code for the blue color
ser.write(listening_byte.encode("ascii")) # encodes and sends the serial byte
s.speak("How can I help you?")
return response.lower()
else:
pass
except sr.WaitTimeoutError:
pass
except sr.UnknownValueError:
pass
except sr.RequestError:
print("Network error.")
s = Shane()
s.start_conversation_log()
# Used to prevent people from asking the same thing over and over
previous_response = ""
while True:
response = s.listen(recognizer, microphone)
command = s.hear(recognizer, microphone, response)
if command == previous_response:
s.speak("You already asked that. Ask again if you want to do that again.")
previous_command = ""
response = s.listen(recognizer, microphone)
command = s.hear(recognizer, microphone, response)
s.analyze(command)
previous_response = command
| UTF-8 | Python | false | false | 10,450 | py | 2 | main.py | 2 | 0.574163 | 0.568325 | 0 | 288 | 35.284722 | 114 |
NateWeiler/Resources | 6,081,673,738,620 | 0fed99666bb09a6abe6bfa5f0fce68f7e1df2591 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Cheese Boys/cheeseboys/level/__init__.py | f5f81e1579d9083bda123e15b360e74741406281 | []
| no_license | https://github.com/NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | false | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | 2021-12-09T12:39:04 | 2022-09-08T15:20:18 | 2,434,051 | 1 | 0 | 32 | null | false | false | version https://git-lfs.github.com/spec/v1
oid sha256:fa67e7fcc4a015be644cf9728522d05bdddea4dc92dddadc6fa6067d00dee242
size 138
| UTF-8 | Python | false | false | 128 | py | 36,207 | __init__.py | 16,386 | 0.882813 | 0.578125 | 0 | 3 | 41.666667 | 75 |
Kludex/stream-csv | 4,097,398,814,647 | 1537477c5b5cfe956042582d6b284e3ee7ef50ec | 86924bebdf76420fcf481f55ce3ecd9b8985d2d4 | /stream_csv/sqlalchemy.py | 954fd441f103362adb87cee6dd017010638455f0 | [
"MIT"
]
| permissive | https://github.com/Kludex/stream-csv | ba8b04773b78938be35f6e41e50b772b07d5113d | 44c737be9ae8b21254be253e4772618f465d3740 | refs/heads/master | 2023-02-17T03:52:30.339897 | 2021-01-17T15:47:49 | 2021-01-17T15:47:49 | 330,420,967 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List, TypeVar
from stream_csv.stream import stream_data
sqlalchemy_error_message = (
"This module requires sqlalchemy, please install it with\n\n"
"pip install sqlalchemy"
)
starlette_error_message = (
"This module requires starlette, please install it with:\n\n"
"pip install starlette"
)
try:
from starlette.background import BackgroundTask
from starlette.responses import StreamingResponse
except ImportError:
raise RuntimeError(starlette_error_message)
try:
from sqlalchemy import inspect
except ImportError:
raise RuntimeError(sqlalchemy_error_message)
Model = TypeVar("Model", bound="Base")
class StreamingCSVResponse(StreamingResponse):
def __init__(
self,
content: List[Model],
status_code: int = 200,
headers: dict = None,
media_type: str = None,
background: BackgroundTask = None,
) -> None:
columns = [c.key for c in inspect(type(content[0])).mapper.column_attrs]
generator = stream_data(
[{column: getattr(row, column) for column in columns} for row in content]
)
super().__init__(
content=generator,
status_code=status_code,
headers=headers,
media_type=media_type,
background=background,
)
| UTF-8 | Python | false | false | 1,335 | py | 6 | sqlalchemy.py | 5 | 0.652434 | 0.649438 | 0 | 49 | 26.244898 | 85 |
xingchengxia/snacks | 5,849,745,460,252 | b2c551fed16e82755e88abfe78b470ab66403e20 | a287c6173452913a552304849ba6bfa53e2aa289 | /spider/selenium_webdriver.py | 383991e184f36464bf32cce4d645e1837b723789 | []
| no_license | https://github.com/xingchengxia/snacks | aa94eb06f70f3c96b4b77e50405cad54693615f3 | 4f4b695f4e238ea3b497967c895926ae50327a09 | refs/heads/master | 2017-05-12T02:33:00.758753 | 2017-05-12T01:12:38 | 2017-05-12T01:12:38 | 82,787,596 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
import time
import selenium.webdriver.chrome.service as service
def service(path):
service = service.Service(path)
service.start()
capabilities = {'chrome.binary': '/path/to/custom/chrome'}
driver = webdriver.Remote(service.service_url, capabilities)
driver.get(url)
time.sleep(5)
driver.quit()
def main():
path = '/Users/lannister/workspace/chromedriver'
#url = "http://amazon.jp"
url = "https://www.sogou.com/"
driver = webdriver.Chrome(path)
driver.get(url)
time.sleep(5)
search = driver.find_element_by_id("query")
search.send_keys("Lamy 2000")
search.submit()# or# driver.find_element_by_id("std").click()
#driver.find_element_by_class_name("").click()
time.sleep(5)
driver.quit()
main()
| UTF-8 | Python | false | false | 748 | py | 13 | selenium_webdriver.py | 13 | 0.719251 | 0.709893 | 0 | 28 | 25.714286 | 63 |
thinkopensolutions/tko-l10n_br | 18,021,682,806,266 | 1b9319849e90ff51b2f02399613fd325a808b6a4 | 2085b049402997977d0a107a886fcaf210e0be7f | /tko_br_cnab/wizard/mail_sent_info.py | 20818c86f48c9984b61dd86bc6355cd7e678e227 | []
| no_license | https://github.com/thinkopensolutions/tko-l10n_br | 705d2f0f95a5e63bc91dfb27a0d05d6476a8973e | 54bf1e5c1ffab1fbeebccef142cc81735aeac700 | refs/heads/11.0 | 2021-01-23T01:01:28.739259 | 2018-12-11T20:19:30 | 2018-12-11T20:19:30 | 85,862,448 | 0 | 0 | null | false | 2018-12-11T20:19:31 | 2017-03-22T18:27:48 | 2018-07-10T17:26:36 | 2018-12-11T20:19:30 | 275 | 0 | 0 | 0 | Python | false | null | # -*- coding: utf-8 -*-
# © 2017 TKO <http://tko.tko-br.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
class MailSentInfo(models.TransientModel):
_name = 'mail.sent.info'
message = fields.Html(readonly =True)
def message_ok(self):
return True
| UTF-8 | Python | false | false | 329 | py | 21 | mail_sent_info.py | 12 | 0.652439 | 0.631098 | 0 | 15 | 20.866667 | 63 |
Baidoolot/parlament | 10,196,252,385,734 | bf5bc5b3880d5f761ed4690b9d3928e6cf14542f | 5e89cc3cd83119741f7f054157736a38a2d96400 | /app/view.py | 0707be3b890a79db6788b651c0a1dc837b9b0587 | []
| no_license | https://github.com/Baidoolot/parlament | 0609f4ecc91cea997dcda79e9e20e2a9f0041c79 | 84a7becf56450badfa6ae2167fefb49f56035895 | refs/heads/master | 2022-11-14T07:40:46.894642 | 2020-07-08T12:01:34 | 2020-07-08T12:01:34 | 278,076,866 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import render_template
from flask import request, redirect, url_for
from flask_security import login_required
from flask_login import logout_user
from app import app, db
from admin import University
@app.route("/logout")
def logout():
logout_user()
return redirect('index')
@app.route('/')
@login_required
def index():
obj = db.session.query(University).all()
return render_template('index.html', n=obj)
@app.route('/detail/<int:pk>/')
@login_required
def detail(pk):
univer = db.session.query(University).filter(University.id==pk).first()
return render_template('detail.html', univer=univer)
| UTF-8 | Python | false | false | 641 | py | 4 | view.py | 2 | 0.714509 | 0.714509 | 0 | 28 | 21.857143 | 75 |
R-LoveIV/Classification | 764,504,209,114 | 2147314b27e8f59863d117e6ea8f01493915ac59 | 4e595dd58e803b3109e84a584863711e711675d5 | /Classification Algorithms.py | f975ae6a327ab4f32f28cca4f76b49b232b0b0a5 | [
"Unlicense"
]
| permissive | https://github.com/R-LoveIV/Classification | a84337e47816c3c5284bb3266ea98b5fbcceb135 | 5e7c3401de1c32e6e8a50d1e89f07a843dc677f7 | refs/heads/main | 2023-07-19T09:51:42.835033 | 2021-09-19T03:56:21 | 2021-09-19T03:56:21 | 408,025,618 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from sklearn.datasets import fetch_openml
# In[2]:
#load the Dataset
mnist = fetch_openml('mnist_784')
# In[3]:
#Explode the data set
mnist
# In[4]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[5]:
import matplotlib
import matplotlib.pyplot as plt
# In[6]:
#seperate into two varriables
x, y = mnist['data'], mnist['target']
# In[15]:
random_digit = x[3600]
some_random_digit = random_digit.reshape(28,28)
plt.imshow(some_random_digit,cmap=matplotlib.cm.binary, interpolation="nearest")
# In[18]:
x_train, x_test = x[:6000] , x[6000:7000]
y_train , y_test = y[:6000], y[6000:7000]
# In[9]:
import numpy as np
shuffle_index = np.random.permutation(6000)
x_train , y_train = x_train[shuffle_index], y_train[shuffle_index]
# In[23]:
y_train_2 = y_train.astype(np.int8)
y_test_2 = y_test.astype(np.int8)
y_train_3 = (y_train == 2)
y_test_3 = (y_test == 2)
# In[25]:
y_test_2
# In[26]:
from sklearn.linear_model import LogisticRegression
# In[27]:
clf = LogisticRegression(tol=0.1)
# In[28]:
clf.fit(x_train,y_train_2)
# In[29]:
y_pred = clf.predict([random_digit])
# In[30]:
y_pred
# In[31]:
from sklearn.model_selection import cross_val_score
# In[32]:
a = cross_val_score(clf,x_train,y_train_2,cv=3,scoring="accuracy")
# In[33]:
a.mean()
# In[ ]:
| UTF-8 | Python | false | false | 1,387 | py | 1 | Classification Algorithms.py | 1 | 0.638789 | 0.577505 | 0 | 131 | 9.557252 | 80 |
arenAghajanyan/Intro-to-Python | 395,137,007,650 | 9a540b80b7edc74dbd6f80177493ef1c17ea18df | 627cea608d05433ca1505f9e787233941d069b6a | /week4/Practical/Problem12.py | f3f9928557f78e42ba2592f46cf9e57b5ff47704 | []
| no_license | https://github.com/arenAghajanyan/Intro-to-Python | 5a25a9ac2f1172b0dd7a3f208c7fd29f5d5ef916 | 486aefac1cb0c2562d4039555c6e900a2c900a55 | refs/heads/master | 2020-07-06T09:22:52.374910 | 2019-10-04T11:17:00 | 2019-10-04T11:17:00 | 202,969,738 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | list1=[1,2,3,4,5,134,234,345,567,2345,20,21]
list2=[x for x in list1 if x>20]
print(list1)
print(list2) | UTF-8 | Python | false | false | 103 | py | 77 | Problem12.py | 73 | 0.699029 | 0.38835 | 0 | 4 | 25 | 44 |
Thesys-lab/InMemoryCachingWorkloadAnalysis | 14,929,306,322,128 | bab11294c73363a13365891aba921c1b83f042e5 | 56b81a46107acebb24be67368b4fb17d440f423a | /cacheTraceAnalysis/core/req.py | ffa9e4db6d81b911ac585f1e6e4db06aa0bc37ef | [
"Apache-2.0"
]
| permissive | https://github.com/Thesys-lab/InMemoryCachingWorkloadAnalysis | 451dec27eb7fbb9bdf4bd1258e3c0690f029464e | 5f6f9f7e29a164478f3fc28eb64c170bbbafdec7 | refs/heads/master | 2023-01-04T09:55:32.459308 | 2020-09-30T15:50:21 | 2020-09-30T15:50:21 | 291,110,974 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import inspect
import collections
try:
from dataclasses import dataclass
@dataclass
class Req:
logical_time: int
obj_id: int
real_time: int = -1
obj_size: int = -1
req_size: int = -1
key_size: int = -1
value_size: int = -1
req_range_start: int = -1
req_range_end: int = -1
cnt: int = 1
op: str = ""
ttl: int = -1
other: str = ""
def ss(self):
print("locals {}".format(locals().items()))
def __str__(self):
return self.__repr__()
def __repr__(self):
members = inspect.getmembers(self, lambda a: not(inspect.isroutine(a)))
members = [a for a in members if not(
a[0].startswith('__') and a[0].endswith('__'))]
# print(members)
s = f"Req("
for member in members:
if member[1] != -1 and member[1] != "":
s += "{}={},\t".format(*member)
s = s[:-1] + ")"
return s
except:
Req = collections.namedtuple('Req', ["logical_time", "obj_id", "real_time", "obj_size", "req_size",
"key_size", "value_size", "req_range_start", "req_range_end", "cnt", "op", "ttl", "client_id", "namespace"])
# Req.__new__.__defaults__ = (None,) * len(Req._fields)
Req.__new__.__defaults__ = (None, None, None, 1, 1, 0, 1, None, None, 1, None, None, None, None)
| UTF-8 | Python | false | false | 1,339 | py | 46 | req.py | 37 | 0.520538 | 0.505601 | 0 | 47 | 27.319149 | 114 |
shiv379/Python_ROT | 3,143,916,102,942 | 1d0ac1bb52d777a09d2b4023b060eebccd6fae01 | b674b9496501867e6cc7c4e47f215ec3ac6b577b | /rot_tools.py | 432a445c5f703ea5f9f5cce577c39922474c0247 | []
| no_license | https://github.com/shiv379/Python_ROT | 750582affff78d53626287589bf3fbc0f4f7d4af | 26542d37262260107f9c1fa239e2a33085d91f27 | refs/heads/master | 2021-05-05T19:49:46.035172 | 2017-09-24T21:43:23 | 2017-09-24T21:43:23 | 103,897,426 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sc_encode
from getkey import getKey
def menu():
global plain_text
while True:
menu = {}
menu['1']="Enter text"
menu['9']="Exit"
if plain_text is not None:
menu['2']="Fixed shift"
menu['3']="Brute force"
menu['4']="Output to file: OFF"
os.system('clear')
options=sorted(menu.keys())
print('{:*^40}'.format('ROT Decoder'),"\n\n")
print("Input text: ",plain_text,"\n\n")
for entry in options:
print(entry, menu[entry])
print("")
selection=getKey()
if selection not in options:
print()
elif selection =='1': #Enter text
plain_text = None
while plain_text is None:
plain_text = input("\nEnter message to encode: ").strip()
elif selection == '2': #Fixed shift
shift = None
while not isinstance(shift, int):
try:
shift=int(input("Shift: "))
except ValueError:
print("Please enter a valid whole number.")
print("\n", sc_encode.decode(plain_text, shift))
getKey()
elif selection == '3': #Brute force
os.system('clear')
sc_encode.brute_force(plain_text)
elif selection == '4': #File out
print("Not implemented yet\nPress any key")
getKey()
elif selection == '9': #Exit
os.system('clear')
break
plain_text = None
menu() | UTF-8 | Python | false | false | 1,589 | py | 3 | rot_tools.py | 2 | 0.492763 | 0.485211 | 0 | 49 | 31.428571 | 74 |
sailfish009/IsoTensor | 16,312,285,801,919 | af1d39d4e00863f688719652200860b17061958d | acf7422d7e30b4fab49870a6db60952799edc52b | /layer/__init__.py | 3afac6f8d7622f954cd83758568151e296ed4f8a | [
"MIT"
]
| permissive | https://github.com/sailfish009/IsoTensor | 361832b3ccd47f2cf190c960594b6df5927284c7 | aa797678fc16071667dd0255c42bf0b925ad09dc | refs/heads/master | 2023-08-15T10:06:14.601510 | 2021-10-05T05:06:39 | 2021-10-05T05:06:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import MERAlayer
from . import TNRlayer
| UTF-8 | Python | false | false | 48 | py | 28 | __init__.py | 27 | 0.770833 | 0.770833 | 0 | 2 | 22.5 | 23 |
Gujing-Ace/machine_learning_book | 7,980,049,267,514 | 9c1b4e33f2bb0ef3843214ab9091b3230e45c231 | 9124994ae08544ac56341df9cce30e35959c7d3f | /第五章/KNN_with_Kfold.py | 5ef517f8927f7b0e35b09ff9c66f353d0d7b7717 | []
| no_license | https://github.com/Gujing-Ace/machine_learning_book | cdc654c072db29ab72e3ae1f52f3d51d40d6289a | 4ea937e9260336960ce7881f47aba694626e0ebc | refs/heads/master | 2023-03-16T17:10:54.306612 | 2020-07-17T01:21:42 | 2020-07-17T01:21:42 | 562,814,892 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
@Author: Runsen
@微信公众号: 润森笔记
@博客: https://blog.csdn.net/weixin_44510615
@Date: 2020/4/19
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
#读取鸢尾花数据集
iris = load_iris()
x = iris.data
y = iris.target
k_range = range(1, 20)
k_score = []
#循环,取k=1到k=20,查看正确率
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
#cv参数决定数据集划分比例,这里是按照4:1划分训练集和测试集
scores = cross_val_score(knn, x, y, cv=5, scoring='accuracy')
k_score.append(np.around(scores.mean(),3))
#画图,x轴为k值,y值为正确率
plt.plot(k_range, k_score)
plt.xticks(np.linspace(1,20,20))
plt.xlabel('Value of K for KNN')
plt.ylabel('score')
plt.show()
print(k_score)
print("最终的最佳K值:{}".format(int(k_score.index(max(k_score))) + 1 ))
print("最终最佳准确率:{}".format(max(k_score)))
| UTF-8 | Python | false | false | 1,056 | py | 81 | KNN_with_Kfold.py | 75 | 0.713801 | 0.678733 | 0 | 32 | 26.59375 | 65 |
rcitterio/brownie | 9,646,496,568,371 | a0d8c666461012a2a364d46a7c4e7bd556d25181 | f8442b014a1f4a15d72796ae28d4eeac6b408fcb | /tests/brownie-test-project/scripts/token.py | 81cc3210db6684ca5d92b65a018747f1b2180e00 | [
"MIT"
]
| permissive | https://github.com/rcitterio/brownie | fc67131e1d086d4f7a496a6ea56f3fbac72655ec | f111d299974f11e451b0acd790e12b40986659e4 | refs/heads/master | 2022-02-24T12:10:40.008239 | 2019-09-30T10:46:03 | 2019-09-30T10:46:03 | 212,529,997 | 1 | 0 | MIT | true | 2019-10-03T08:23:58 | 2019-10-03T08:23:55 | 2019-10-03T08:22:19 | 2019-10-02T09:24:43 | 3,347 | 0 | 0 | 0 | null | false | false | #!/usr/bin/python3
from brownie import *
def main():
accounts[0].deploy(BrownieTester, True)
def donothing(a):
return a
| UTF-8 | Python | false | false | 133 | py | 51 | token.py | 26 | 0.669173 | 0.654135 | 0 | 11 | 11.090909 | 43 |
MarceloCFSF/Naja | 4,758,823,795,733 | 66b783bc16d85a0e1781085a29abc022da27d409 | c47e4c82a68563dbb5828dae8e9b1a3598297b7c | /main.py | eec4c8eadd6a1164e55a4e26a495c11ea53eb405 | []
| no_license | https://github.com/MarceloCFSF/Naja | b0f28afc1a1feae7339d916a2b11189e6be0290a | edc38d5bd02afe840ea2ad006491e0d950191818 | refs/heads/master | 2023-07-11T15:06:06.850798 | 2021-08-14T05:17:09 | 2021-08-14T05:17:09 | 395,882,114 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from antlr4 import *
from NajaLexer import NajaLexer
from NajaParser import NajaParser
from rewriter import RewriteListener
def main(argv):
input = FileStream(argv[1])
lexer = NajaLexer(input)
stream = CommonTokenStream(lexer)
parser = NajaParser(stream)
tree = parser.prog()
walker = ParseTreeWalker()
walker.walk(RewriteListener(), tree)
print("Compilado com sucesso")
if __name__ == '__main__':
main(sys.argv)
| UTF-8 | Python | false | false | 463 | py | 8 | main.py | 6 | 0.701944 | 0.697624 | 0 | 19 | 23.368421 | 40 |
almightynerve/prog12 | 6,442,450,946,408 | 6e9642567ddec3a977f3ec8a8229b73edb3df88d | bcdf8000d51601e44722120bfba50e22554ca181 | /MADLIP1- Garsha Iravani12.py | 553fe264950744de41b70e9775a672cc383b2402 | []
| no_license | https://github.com/almightynerve/prog12 | 7144f7e472771b66b54b7d58375f1b308b1c52c2 | 4f7f476e21adab228b822fbba6af503ecfaac12b | refs/heads/master | 2020-07-27T03:33:28.764767 | 2019-11-26T17:55:27 | 2019-11-26T17:55:27 | 208,853,241 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("welcome to the most meaningless MADLIP of all time")
print("to play the game, first you must be creative and put some AWESOME WORDS!!!!")
holiday= input("a kind of holiday:")
noun1= input("type a noun:")
place= input("a place:")
person= input(" a person:")
adjective1= input("type an adjective:")
bodypart= input(" a plural word from body parts:")
verb= input(" type a verb:")
adjective2= input(" another type of adjective:")
noun2= input("type another noun:")
food= input(" type a kind of food:")
pluralnoun= input("type a plural noun:")
print("I can't believe it's already " + holiday)
print("I can't wait to put on my " + noun1)
print("and visit every" + place)
print("in my neighbourhood.")
print("This year i'm going to dress as " + person)
print("with " + adjective1)
print(bodypart)
print(" before i " + verb)
print(" i make sure to grab my " + adjective2)
print(noun2)
print("to hold all of my " + food)
print("finally,all of my " + pluralnoun + " are ready to go")
comment = input("Rate the game from 1 to 10 ")
print(comment)
print("created by GI")
| UTF-8 | Python | false | false | 1,100 | py | 5 | MADLIP1- Garsha Iravani12.py | 5 | 0.676364 | 0.666364 | 0 | 28 | 37.142857 | 84 |
khimacademy/c104 | 1,391,569,426,480 | 2bec43543042828c159484ff61bcbcddc769c769 | d1c53def818f9c7e1bd660e3303a754f297aff43 | /code/ch6/7_4_b.py | eb97c6c6382b46fd3d0cdcd8e5121fe750820cad | []
| no_license | https://github.com/khimacademy/c104 | dcdcae13499e5b68905f09ea009e1a2b9f552e1c | 83443858d5b85c23c107fa09cd672d17549776ee | refs/heads/master | 2020-03-26T10:57:52.536935 | 2018-08-25T06:17:04 | 2018-08-25T06:17:04 | 144,822,712 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
7-4 피자 토핑
사용자가 'quit' 값을 입력할 때까지 계속해서 어떤 피자 토핑을 추가할지 물어보는 루프를 만드세요. 사용자가 토핑을 입력하면 그 토핑을 피자에 추가하겠다는 메시지를 출력하세요.
Output:
What topping would you like on your pizza?
Enter 'quit' when you are finished: pepperoni
I'll add pepperoni to your pizza.
What topping would you like on your pizza?
Enter 'quit' when you are finished: sausage
I'll add sausage to your pizza.
What topping would you like on your pizza?
Enter 'quit' when you are finished: bacon
I'll add bacon to your pizza.
What topping would you like on your pizza?
Enter 'quit' when you are finished: quit
'''
| UTF-8 | Python | false | false | 729 | py | 80 | 7_4_b.py | 80 | 0.739206 | 0.735751 | 0 | 20 | 27.9 | 100 |
chihunmanse/LUSH-motive-Project | 17,317,308,155,789 | 5266a7eb50796152cf8c5eb81bb7ad091d7754db | 6c6718a151d0f207b93e639485f3b889b223e328 | /carts/models.py | ec16552e92b809831ab04a225f8ef2d728a302c1 | []
| no_license | https://github.com/chihunmanse/LUSH-motive-Project | f30be5b9e664e556301fcff529ba83c1f1d69524 | d8c751b52144e0f1c889b492d3914121df9efaf5 | refs/heads/main | 2023-08-11T04:06:53.521464 | 2021-10-16T13:42:54 | 2021-10-16T13:42:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
class Cart(models.Model) :
user = models.ForeignKey('users.User', on_delete = models.CASCADE)
option = models.ForeignKey('products.Option', on_delete = models.CASCADE)
quantity = models.PositiveIntegerField(default = 0)
class Meta :
db_table = 'carts' | UTF-8 | Python | false | false | 333 | py | 15 | models.py | 13 | 0.642643 | 0.63964 | 0 | 9 | 36.111111 | 79 |
sunyanhui/autotest | 11,003,706,239,668 | 3bb7fa0a8fc21558702a8f260795a19b8e4702c0 | 89362bd60333e20a7cfc322751faaea1bb49ae78 | /veeker/testsuite/olsm/test_enterprise_modify_info.py | f8ba572a135506090bc189e8fe9dd591f329c193 | []
| no_license | https://github.com/sunyanhui/autotest | 85548bfdd24c4e19466f25ab94318d7e8e52b8b0 | b8acaceddae3491066b1d864ab734a213e8aa1d1 | refs/heads/master | 2016-09-05T09:16:20.776636 | 2015-03-13T07:01:46 | 2015-03-13T07:01:46 | 23,528,191 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from common import config
from testdata.olsm.data_enterprise_modify_info import *
from action.action_login import Login
from action.enterprise.action_enterprise_modify_info import EnterpriseModifyInfo
import unittest
import logging
import time
class TestEnterpriseModifyInfo(unittest.TestCase):
u'''
测试超市修改基本信息
'''
def setUp(self):
self.login = Login()
self.enterpriseModifyInfo = EnterpriseModifyInfo()
def tearDown(self):
time.sleep(1)
self.login.quit()
def test_modify_info_case1(self):
u'''测试超市版注册功能,注册成功后,使用新注册的账号登录'''
self.assertTrue(self.login.open_browser(config.BASE_URL),u"打开首页失败")
r = self.login.login(**test_modify_info_case1)
self.assertTrue(r.result, r.msg)
r = self.enterpriseModifyInfo.modify_info(**test_modify_info_case1)
self.assertTrue(r.result, r.msg)
if __name__ == '__main__':
#logging.basicConfig(level=logging.DEBUG)
a = unittest.TestSuite()
a.addTests(unittest.makeSuite(TestEnterpriseModifyInfo))
b = unittest.TextTestRunner()
b.run(a)
#unittest.main() | UTF-8 | Python | false | false | 1,243 | py | 200 | test_enterprise_modify_info.py | 199 | 0.684211 | 0.678171 | 0 | 40 | 28 | 80 |
yuyanf/skolePython | 3,917,010,176,859 | 73fdbbe171783079a253c9f970f9fe2d87927118 | ad305960008e8a29bab8a3fb8407d9d4e3db29e0 | /skop.py | 3e4569100ab434c316f7046e9c096ef001f6724a | []
| no_license | https://github.com/yuyanf/skolePython | a03a90de36a824edc0a252191ae417b7f18eb18e | 3f6921252026ad7b8e4eab4d89a6929b0a65bf96 | refs/heads/main | 2023-03-25T12:19:27.974294 | 2021-02-20T20:24:07 | 2021-02-20T20:24:07 | 340,743,727 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print('''
Først definerer vi funksjon minFunkjson som ikke tar imot noen parametre. Deretter definerer vi funksjon hovedprogram som tar heller ikke imot noen parametre. Deretter kalles vi hovedprogram.
Inni hovedprogram opprettes en variabel a med verdi 42, og en variabel b med verdi 0, og skriver ut b til terminalen. Deretter settes vi a sin verdi inni b, altså b har nå verdi 42. Så kalles vi minFunksjon.
Inni minFunksjon oppretter vi en for-løkke med range 2, dvs. det looper to runder når x er 0 og x er 1.
Første runde når x er 0:
Opprettes en variabel c med verdi 2 og skriver ut c til terminalen. Deretter setter vi c er lik c pluss 1 som gir da en verdi 3. Deretter oppretter vi variabel b med verdi 10, og b er lik b pluss a. Deretter skriver ut b til terminalen.
Andre runde når x er 1:
Variabel c settes til verdi 2 og skriver ut til terminalen. Deretter setter vi c er lik c pluss 1 som gir en verdi 3. b settes til verdi 10, og b er lik b pluss a. Deretter skriver ut b til terminalen. For-løkka stopper å loope etter 2 runder, og vi avslutter funksjon minFunksjon og returnerer b sin verdi.
Men siden a er aldri definert, så b+=a blir error. Programmet stopper seg på første runde i for-løkka da det kommer til linje- b+=a. Så å kalle hovedprogram får man slike resultater:
0 (det er print(b) før error- a = minFunksjon())
2 (det er print(c) i første runde i for-løkka før error- b +=a i minFunksjon)
Error"
''')
| UTF-8 | Python | false | false | 1,479 | py | 44 | skop.py | 38 | 0.753603 | 0.737817 | 0 | 19 | 74.578947 | 307 |
igor-93/mlcomp | 1,425,929,190,181 | 4ed11eaacddc3e9449138227df9e6e70e0047002 | 3a83fc882a2518397c0bb0471de6dca7f6906797 | /classify_data.py | e2881a86c661f1d055ea232a19e9199ac8e736ea | []
| no_license | https://github.com/igor-93/mlcomp | e83869ba81bacc3f8edd99e17089f485709513e9 | f5abd9b4094e023dbd6c5bf96214d2de951d36d7 | refs/heads/master | 2021-10-25T21:05:49.980036 | 2019-04-07T07:58:37 | 2019-04-07T07:58:37 | 86,148,004 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from preprocess_data import preprocess_image
from load_data import *
n_estimators = 1000
classifier = RandomForestClassifier(n_estimators=n_estimators, class_weight="balanced",n_jobs = -1)
min_certainty = 0.5
max_level = 3
min_level = 0
def evaluate_performance(X, y):
scores = cross_val_score(classifier, X, y, cv=5, scoring='neg_log_loss')
print(scores.mean(), scores.std())
def evaluate_performance_FPFN(X, y):
result = classifier.predict(X)
TP = 0
TN = 0
FN = 0
FP = 0
for a, b in zip(result, y):
if a != b:
if a == 0:
FN += 1
else:
FP += 1
else:
if a == 0:
TN += 1
else:
TP += 1
print("FP: {}; FN: {}; TP: {}; TN: {}".format(FP, FN, TP, TN))
def train(X, y):
classifier.fit(X, y)
def predict(X):
return classifier.predict_proba(X)[:, 1]
| UTF-8 | Python | false | false | 920 | py | 14 | classify_data.py | 11 | 0.65 | 0.627174 | 0 | 47 | 18.574468 | 99 |
habraino/meus-scripts | 13,469,017,460,238 | aee8aa7701ad7c0927a1a87146fd476640333401 | 94ce54fb099265c40939fb63b9935410742d446f | /_prog/_python/_treinamento/funcaoTeste.py | e30a2435a6d7707b6250bc5b6e4e59a4b1097b10 | []
| no_license | https://github.com/habraino/meus-scripts | f98d0780563d3228fa1451978adacf69a27e43e7 | 158fc463cbf3d2d99f0d5f584b11c777133a4279 | refs/heads/master | 2023-04-09T09:27:05.643106 | 2021-04-14T12:13:34 | 2021-04-14T12:13:34 | 357,892,446 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Decorator(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
print('Before the function call.')
res = self.func(*args, **kwargs)
print('After the function call.')
return res
import types
@Decorator
def testfunc():
print('Inside the function.')
a = isinstance(testfunc, types.FunctionType)
print(a)
print(type(testfunc))
#------------------------------------------
def decoratorfatory(mesage):
def decorator(func):
def wrapped_func(*args, **kwargs):
print('The decorator want to tell you: {}'.format(mesage))
return func(*args, **kwargs)
return wrapped_func
return decorator
@decoratorfatory("Hello Brayen")
def test():
pass
test()
| UTF-8 | Python | false | false | 788 | py | 406 | funcaoTeste.py | 395 | 0.583756 | 0.583756 | 0 | 36 | 20.888889 | 70 |
pkkwilliam/Pattern-Recognition | 16,011,638,107,622 | 9a7563f980f7ebe661ec066ef2c56a2bf4c601ec | 4b0de84d99abcc3be85d27740ca98886574df89e | /ML/play.py | 587c978833ceca616b1cc3af695665ee276793c6 | []
| no_license | https://github.com/pkkwilliam/Pattern-Recognition | 8b9f035d4f2a1efac7aef4bd0d5e618cdc765653 | b671ab9d3bfdf82155d5f1e7aa831df4f775a6f8 | refs/heads/master | 2021-08-31T00:06:56.003974 | 2017-12-19T22:47:03 | 2017-12-19T22:47:03 | 112,343,616 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as numpy
#print(numpy.random.randn(5))
def testFunction(array):
value = 0;
for number in array:
value += number
print(value)
return [value, "haha"]
a = [1,2,3]
result = testFunction(a)
print("The result is ",result[1])
| UTF-8 | Python | false | false | 243 | py | 15 | play.py | 7 | 0.674897 | 0.650206 | 0 | 15 | 15.133333 | 33 |
xuarehere/yolo_series_deepsort_pytorch | 15,908,558,877,687 | e60c0cbe41c2b81024d3e913744058345c95d537 | 33513f18f4ee7db581a9ccca0d15e04d3dca0e2d | /detector/PPYOLOE/ppyoloe/models/__init__.py | abe33a264d5c38cdfdb43cf0a86db763ef1fc444 | [
"Apache-2.0",
"MIT"
]
| permissive | https://github.com/xuarehere/yolo_series_deepsort_pytorch | 0dd73774497dadcea499b511543f3b75e29d53e1 | 691e6eb94260874de29a65702269806fc447cf8c | refs/heads/master | 2023-05-23T18:32:00.991714 | 2023-02-01T10:58:18 | 2023-02-01T10:58:18 | 513,482,826 | 36 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .backbone import CSPResNet
from .ppyoloe import PPYOLOE
from .ppyoloe_head import PPYOLOEHead
from .neck import CustomCSPPAN
| UTF-8 | Python | false | false | 130 | py | 89 | __init__.py | 51 | 0.838462 | 0.838462 | 0 | 4 | 31.5 | 37 |
ziberleon/casos | 10,213,432,246,534 | d266e570bbd5aa533441f4f804dbc3bcce3cf172 | ece2e4864759c235b4083dc031834b6350612142 | /apps/casos/migrations/0011_auto_20160205_1118.py | d46172f932bc6192197c13233b79447c885ce10a | []
| no_license | https://github.com/ziberleon/casos | 758521df2c4cb0a79175f9e81f80fee2bce4774e | fb3ae9070a35c463867ef0964961a2da8a52d873 | refs/heads/master | 2017-04-27T15:27:17.039152 | 2017-03-22T19:22:00 | 2017-03-22T19:22:00 | 47,713,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('casos', '0010_auto_20160203_1512'),
]
operations = [
migrations.AlterField(
model_name='caso',
name='foliosReemo',
field=models.CharField(max_length=250, verbose_name=b'Folios de REEMO', blank=True),
),
]
| UTF-8 | Python | false | false | 449 | py | 57 | 0011_auto_20160205_1118.py | 35 | 0.599109 | 0.554566 | 0 | 19 | 22.631579 | 96 |
r3m0t/python-modernize | 721,554,536,632 | 739781b8a50d64829db36ca10fc7fb99470c6660 | 74b3dec1956c58f0d0086772b03159a69adf23cf | /libmodernize/fixes/fix_print.py | 3c3144066bc3ad3afa777283936729cfdaa0ee57 | [
"Python-2.0"
]
| permissive | https://github.com/r3m0t/python-modernize | 3972eadb2fe4bc4d965981a23a74b788ab1e56ca | 3614adae1164b65b5c3b08c5f002d5836589028a | refs/heads/master | 2021-01-21T00:38:33.977688 | 2013-07-11T13:04:14 | 2013-07-11T13:04:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
from __future__ import unicode_literals
from lib2to3 import patcomp, pytree, fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Name, Call, Comma, FromImport, Newline, String
from libmodernize import check_future_import
parend_expr = patcomp.compile_pattern(
"""atom< '(' [arith_expr|atom|power|term|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def start_tree(self, tree, filename):
self.found_print = False
def transform(self, node, results):
assert results
bare_print = results.get('bare')
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name('print'), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name('print')
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = ' '
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, '>>'):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = ''
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, 'sep', String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, 'end', String(repr(end)))
if file is not None:
self.add_kwarg(l_args, 'file', file)
n_stmt = Call(Name('print'), l_args)
n_stmt.prefix = node.prefix
self.found_print = True
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = ''
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, '='),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = ' '
l_nodes.append(n_argument)
def finish_tree(self, tree, filename):
if not self.found_print:
return
for node in tree.children:
if 'print_function' in check_future_import(node):
# already imported
return
add_future_import('print_function', tree)
def add_future_import(name, tree):
"""Add future import.
From: https://github.com/facebook/tornado
Copyright 2009 Facebook
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
if not isinstance(tree, pytree.Node):
# Empty files (usually __init__.py) show up as a single Leaf
# instead of a Node, so leave them alone
return
first_stmt = tree.children[0]
if is_docstring(first_stmt):
# Skip a line and add the import after the docstring
tree.insert_child(1, Newline())
pos = 2
elif first_stmt.prefix:
# No docstring, but an initial comment (perhaps a #! line).
# Transfer the initial comment to a new blank line.
newline = Newline()
newline.prefix = first_stmt.prefix
first_stmt.prefix = ''
tree.insert_child(0, newline)
pos = 1
else:
# No comments or docstring, just insert at the start
pos = 0
tree.insert_child(
pos,
FromImport('__future__', [Name(name, prefix=' ')]))
tree.insert_child(pos + 1, Newline()) # terminates the import stmt
# copied from fix_tuple_params.py
def is_docstring(stmt):
return isinstance(stmt, pytree.Node) and \
stmt.children[0].type == token.STRING
| UTF-8 | Python | false | false | 5,119 | py | 6 | fix_print.py | 4 | 0.579801 | 0.572377 | 0 | 155 | 32.025806 | 79 |
nitheeshmavila/python-solutions | 18,562,848,687,674 | c45170b72574c74c4abfbd097fc304ac55447c8d | 3e0230a320a15fd8009b0aad04d37e1d50a28362 | /searching/binary_search.py | 0ffa0ed753222c82473f6d2be1551eb987ef9cce | []
| no_license | https://github.com/nitheeshmavila/python-solutions | 1f22a17f6a289c0f75f65ff9dc0ac4eec891cbb1 | 756f2830941a13f8968922967d9638c46857bf7c | refs/heads/master | 2016-09-22T17:23:11.266137 | 2016-09-21T17:04:47 | 2016-09-21T17:04:47 | 64,669,011 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
binary search
--------------
Assume the seq is sorted, seq is a list
function bsearch is a recursive version of binary search
'''
def bsearch(seq,v,l,r):
'''seq - sorted list
v - value to be searched
l - lower index
r - higher index'''
print(seq,v,l,r)
if r-l == 0 :
return False
mid=l+r//2
if seq[mid]==v:
return(v)
if seq[mid]>v:
return(bsearch(seq,v,l,mid))
else:
return(bsearch(seq,v,mid,r))
| UTF-8 | Python | false | false | 485 | py | 72 | binary_search.py | 37 | 0.54433 | 0.540206 | 0 | 23 | 20.086957 | 56 |
HugoSilva177/StockData | 1,683,627,200,398 | d336b2f72e91eec1fc5fb29a4c9b861e6f559bff | 65dd5eb1f6742e05179e85848b30529aab7b91a7 | /extract_transform_load/fundamentus_etl/etl_mongodb/src/dao/IndicadoresDAO.py | e3fe61f1f726f87ee516120ae61c6af24044c9bf | []
| no_license | https://github.com/HugoSilva177/StockData | 9c8179dde841d2d9d6607d9899a3ab0bbed79064 | 78dea51aa550ba9e4d593fa24702c73c9b835571 | refs/heads/master | 2023-08-06T23:57:20.113184 | 2021-09-11T17:38:12 | 2021-09-11T17:38:12 | 336,576,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from extract_transform_load.fundamentus_etl.etl_mongodb.src.connect_db.DAConexaoMongo import DAConexaoMongo
from extract_transform_load.fundamentus_etl.etl_mongodb.src.dao.AbstractMongoDAO import AbstractMongoDAO
class IndicadoresDAO(AbstractMongoDAO):
def __init__(self, nome_banco="fundamentus", nome_colecao="indicadores_empresa"):
super().__init__()
self.__erro = None
self.__colecao_mongo = None
try:
self.__colecao_mongo = DAConexaoMongo(nome_banco, nome_colecao).get_colecao_mongo( )
except Exception:
self.__erro = "Falha em estabelecer conexao com a coleção 'indicadores_empresa' no MongoDB"
def inserir_dados(self, indicadores_fundamentalistas_empresa):
id_inserido_indicadores = self.__colecao_mongo.insert_one(indicadores_fundamentalistas_empresa).inserted_id
return id_inserido_indicadores
def get_erro(self):
return self.__erro | UTF-8 | Python | false | false | 1,000 | py | 104 | IndicadoresDAO.py | 79 | 0.675351 | 0.675351 | 0 | 21 | 46.571429 | 119 |
NextThought/nti.externalization | 15,281,493,639,302 | 46b488b38f04a23836227ea730cc2f5f82ac3546 | 33170dae0911dd3c8ba06eda0f930aa2aa8dfcdf | /src/nti/externalization/numbers.py | e58126f3ccb3b319cd24f781d43b1f02c7635cc6 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | https://github.com/NextThought/nti.externalization | 8223bbde921f5845595d1de75b02931c1b082393 | 5a445b85fb809a7c27bf8dbe45c29032ece187d8 | refs/heads/master | 2021-12-27T12:21:19.494174 | 2021-08-02T18:29:46 | 2021-08-02T18:29:46 | 96,366,103 | 1 | 0 | NOASSERTION | false | 2021-08-02T18:23:23 | 2017-07-05T22:26:39 | 2021-07-23T19:38:12 | 2021-08-02T18:23:22 | 1,212 | 0 | 0 | 6 | Python | false | false | # -*- coding: utf-8 -*-
"""
Support for externalizing arbitrary numbers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
import decimal
from zope import interface
from zope import component
from zope.interface.common.numbers import INumber
from zope.interface.common.numbers import IRational
from nti.externalization.interfaces import IInternalObjectExternalizer
@interface.implementer(IInternalObjectExternalizer)
@component.adapter(INumber)
class second_chance_number_externalizer(object):
def __init__(self, context):
self.context = context
def toExternalObject(self, **unused_kwargs):
return str(self.context)
# Depending on the order of imports, these may or may not have
# been declared already.
# pylint:disable=no-value-for-parameter
if not IRational.providedBy(fractions.Fraction('1/3')): # pragma: no cover
interface.classImplements(fractions.Fraction, IRational)
if not INumber.providedBy(decimal.Decimal('1')): # pragma: no cover
# NOT an IReal; see notes in stdlib numbers.py for why.
interface.classImplements(decimal.Decimal, INumber)
assert IRational.providedBy(fractions.Fraction('1/3'))
assert INumber.providedBy(decimal.Decimal('1'))
| UTF-8 | Python | false | false | 1,281 | py | 131 | numbers.py | 71 | 0.76815 | 0.762685 | 0 | 41 | 30.243902 | 74 |
Zikko1/admindashboard | 3,341,484,578,181 | f673321c489958962b4b3d070b0bd1b94eb1407e | 676f429470f85b6374154c7518b41c9cd2d9f200 | /dashboard/migrations/0004_loansrequest_is_approved.py | 6bdf0ff30c55717d586d751023463e8fe122073d | []
| no_license | https://github.com/Zikko1/admindashboard | a3a701aaecb9276eb122d417040d8f054508b056 | 2c69f31d844aef4ad4b40d1552b714bffeda6ce5 | refs/heads/master | 2023-07-15T15:18:44.486459 | 2021-08-29T19:19:47 | 2021-08-29T19:19:47 | 393,809,256 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.5 on 2021-08-01 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0003_auto_20210730_2228'),
]
operations = [
migrations.AddField(
model_name='loansrequest',
name='is_approved',
field=models.BooleanField(default=False),
),
]
| UTF-8 | Python | false | false | 400 | py | 20 | 0004_loansrequest_is_approved.py | 9 | 0.6 | 0.5225 | 0 | 18 | 21.222222 | 53 |
jhj9109/TIL | 2,989,297,276,125 | 471d2923ca39c2760c6ae9885331de0f626db4c5 | eb6b21e677b34536af6217b9db279150e022ae5a | /swea/LV3/1244. 최대 상금/1244.py | 27db3c004cef2c3206f095858da9495cd4e42480 | []
| no_license | https://github.com/jhj9109/TIL | 78e6c5319e04fc1aebb7cf54d0730cfa54098c0f | 84d75b929a506c90d47697a59193fc8ef4576141 | refs/heads/master | 2023-02-01T23:46:24.059487 | 2020-12-22T01:47:24 | 2020-12-22T01:47:24 | 234,995,689 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.stdin = open('input.txt')
def go(n, k, c):
global maxV
global odd
if(c==0 or n==k):
s = int(''.join(nums))
if maxV < s:
maxV = s
odd = c%2
elif maxV == s:
odd = min(odd, c%2)
else:
for i in range(k):
nums[n], nums[i] = nums[i], nums[n]
cnt = 1 if n != i else 0
go(n+1, k, c-cnt)
nums[n], nums[i] = nums[i], nums[n]
T = int(input())
for tc in range(1, T+1):
nums, change = input().split()
nums = list(nums)
maxV = int(''.join(nums))
odd = int(change)%2
go(0, len(nums), int(change))
if odd:
n1 = maxV%10
n2 = maxV%100//10
maxV = maxV - (n1 + n2*10) + (n1*10 + n2)
print(f"#{tc} {maxV}") | UTF-8 | Python | false | false | 786 | py | 534 | 1244.py | 374 | 0.445293 | 0.410941 | 0 | 32 | 23.59375 | 49 |
mxl1994/Program_Python | 2,980,707,346,737 | dbc3d873b20defdc33eb65c9b683fa802a209644 | 133d9a83c24fcf9bcbe77337f34b18d32b02eff9 | /DjangoProgram/app01/app01/views.py | 80ef3c494c3a0fcdc61f409a192b2621e31119b3 | []
| no_license | https://github.com/mxl1994/Program_Python | bb0c8b8f27835f34046f84b86600b5fc4b985d6d | 1235830fa95db491aa0d0a0851cfa9267c5ac356 | refs/heads/master | 2021-07-12T15:05:36.056703 | 2020-08-06T09:36:16 | 2020-08-06T09:36:16 | 186,586,233 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
from django.shortcuts import render,HttpResponse
from app01.My_Forms import EmpForm
from app01 import models
from django.core.exceptions import ValidationError
def add_emp(request):
if request.method == "GET":
form = EmpForm()
return render(request,"add_emp.html",{"form":form})
else:
form = EmpForm(request.POST)
if form.is_valid():
data = form.cleaned_data
data.pop("r_salary")
models.Emp.objects.create(**data)
# return HttpResponse("ok")
# return render(request, "add_emp.html", {"form": form})
return redirect("/index/")
else:
clean_errors = form.errors.get("__all__") #获取全局钩子错误信息
return render(request,"add_emp.html", {"form": form, "clean_errors": clean_errors})
from django.contrib.auth.models import User
User.objects.create(username='runboo',password='123') | UTF-8 | Python | false | false | 956 | py | 62 | views.py | 57 | 0.622863 | 0.614316 | 0 | 26 | 35.038462 | 91 |
thomelane/mxnet-starterpack | 7,181,185,326,282 | 7ac801aa64d50aa506a0b33d016a0032225395e5 | d356fa1258115d5799bc254a4d7a114bbd37a0d4 | /myproject/metrics.py | 63161ac9e6295cad10ca11a7c0aa0d3c55022554 | []
| no_license | https://github.com/thomelane/mxnet-starterpack | f912b618a0b6a31c1399c5adda7ff532edb8bd09 | 9aef27c451a7f0f80c785eaa71943afc9dac0bbe | refs/heads/master | 2020-07-21T11:14:48.747711 | 2019-09-13T18:53:21 | 2019-09-13T18:53:21 | 206,845,587 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mxnet as mx
metric = mx.metric.Accuracy() | UTF-8 | Python | false | false | 50 | py | 9 | metrics.py | 7 | 0.74 | 0.74 | 0 | 4 | 11.75 | 29 |
cash2one/xai | 12,987,981,129,372 | ae8e93e0bb3b3bc61efc3c7d9c78c2e643b3fe44 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_release.py | 3fb0d95ed65dfbc63c7c0dd0804cc2e716ee4aeb | [
"MIT"
]
| permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#calss header
class _RELEASE():
def __init__(self,):
self.name = "RELEASE"
self.definitions = [u'an occasion when someone is allowed to leave prison, etc.: ', u'the act of flowing out from somewhere: ', u'a feeling that you are free from something unpleasant: ', u'the act of making something public or available for use: ', u'a written statement that gives information to be broadcast or published: ', u'a musical recording that is made available for the public to buy: ', u'If a film is on general release, it is available to be shown in cinemas: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| UTF-8 | Python | false | false | 734 | py | 37,275 | _release.py | 37,266 | 0.692098 | 0.689373 | 0 | 17 | 42.058824 | 478 |
24rahul/rahul_rajput | 8,718,783,648,135 | 3967ff071ef9cdaa6e417040fca195be623ee892 | 32c3bb569a9be60b1ebf68c72aec610e1844f604 | /codechat/wsgi.py | 6418e3d9f80ea2a95811a0972ec57870f91af461 | []
| no_license | https://github.com/24rahul/rahul_rajput | 7b7917eb847423774c0f0ced233c13b298a87f4f | a241b3738e9c3965ea6b5910a84a823006813ec6 | refs/heads/master | 2020-01-27T19:33:33.655651 | 2020-01-12T09:54:47 | 2020-01-12T09:54:47 | 76,533,407 | 1 | 0 | null | false | 2019-12-24T07:51:12 | 2016-12-15T07:09:36 | 2019-12-24T07:51:04 | 2019-12-24T07:51:11 | 24,596 | 0 | 0 | 0 | Python | false | false | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.modules")
# import monkeypatches
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| UTF-8 | Python | false | false | 190 | py | 385 | wsgi.py | 319 | 0.8 | 0.8 | 0 | 7 | 26.142857 | 67 |
anilmr/wireless | 4,355,096,874,087 | 2289d142671c3f4358f695772426038537b33e95 | edeb4419ea52cd8d2ee07cad4a463b12560cb232 | /tcp_bw.py | e92862e670526d6ad5e67d147518d3c63fa8a525 | []
| no_license | https://github.com/anilmr/wireless | 3423be218ce5dae77fbb0f608e9aac90074d8e8a | 2c370d968537f1139c25fa52707cc0917360a844 | refs/heads/master | 2016-05-29T03:15:32.559160 | 2015-05-02T01:03:31 | 2015-05-02T01:03:31 | 34,931,062 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Program to find throughput in packets/s using equations provided in
# Modeling TCP Throughput: A Simple Model and its Empirical Validation
# authored by Jim Kurose and Jithendra Padhye.
# Authored - Anil Mallapur
import math
mss = 1460
rtt_old = 0.032829
wmax_old = 5
b = 3
y = [x * 0.1 for x in range(1, 10)]
y.append(0.03)
def A(p,rtt):
return rtt * (math.sqrt((2 * b * p)/3))
def B(p):
return min(1,3*math.sqrt((3 * b * p)/8))
def C(p):
return p * (1 + (32 * p * p))
def tcp_new_bw(mss,rtt,loss_rate,wmax, initial_rto,
want_bytes_per_second=False):
""" Function to calculate throughput based on equations in paper
Modeling TCP Throughput: A Simple Model and its Empirical Validation
authored by Jim Kurose and Jithendra Padhye
Input :-
mss = Message Segment Size
rtt = Round Trip Time average in seconds
loss_rate = Probability of loss percentage
wmax = Window Max in the beginning of connection
initial_rto = Retransmission time out in seconds
"""
p = loss_rate
return 59.385 * (1 / A(p,rtt) + initial_rto * B(p) * C(p))
for i in y:
print tcp_new_bw(mss,rtt,i,wmax,initial_rto=0.0033,want_bytes_per_second=False)
print("Calculated value")
print tcp_new_bw(mss,rtt,0.033,wmax,0.0033,True)
| UTF-8 | Python | false | false | 1,264 | py | 4 | tcp_bw.py | 3 | 0.68038 | 0.640823 | 0 | 45 | 27.088889 | 108 |
cmosguy/kaggle-hash-code-traffic-signal | 11,888,469,504,079 | 00a4558a70ddf25ecb54866d032dbe407b28eb17 | f880dd0c7f30a2e7570ca9436daee15b30e4a03d | /traffic/harmony_search.py | 38c31aa8b619a5249b2aaa82c38e69a18710a6f5 | []
| no_license | https://github.com/cmosguy/kaggle-hash-code-traffic-signal | d9177728e1270d4a646f644d9b83d3a63fe8e218 | 73ed750cdac27df1844b02149384c039f2e954d7 | refs/heads/master | 2023-04-30T12:32:02.073980 | 2021-05-24T18:01:06 | 2021-05-24T18:01:06 | 366,092,435 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyharmonysearch import ObjectiveFunctionInterface
import random
from bisect import bisect_left
from traffic.traffic import Traffic
from copy import deepcopy
import pandas as pd
class HarmonySearch(ObjectiveFunctionInterface):
"""
This is a toy objective function that contains a mixture of continuous and discrete variables.
Goal:
maximize -(x^2 + (y+1)^2) + 4
The maximum is 4 at (0, -1).
In this implementation, x is a discrete variable with choices ranging from -100 to 100 in increments of 1.
y is still a continuous variable.
Warning: Stochastically solving a linear system is dumb. This is just a toy example.
"""
def __init__(self, min_light_time = 1, max_light_time=3, in_file='./example.in'):
self.traffic = Traffic(in_file=in_file)
self.scheduler = []
num_streets = len(self.traffic.streets)
self._lower_bounds = num_streets * [None]
self._upper_bounds = num_streets * [None]
self._variable = num_streets * [True]
self._discrete_values = num_streets * [[x for x in range(min_light_time, max_light_time+1)]]
# define all input parameters
self._maximize = True # do we maximize or minimize?
self._max_imp = 50000 # maximum number of improvisations
self._hms = 100 # harmony memory size
self._hmcr = 0.75 # harmony memory considering rate
self._par = 0.5 # pitch adjusting rate
self._mpap = 0.25 # maximum pitch adjustment proportion (new parameter defined in pitch_adjustment()) - used for continuous variables only
self._mpai = 10 # maximum pitch adjustment index (also defined in pitch_adjustment()) - used for discrete variables only
def generate_sceduler(self, vector):
self.scheduler = pd.DataFrame({'street_name': deepcopy(self.traffic.street_detail['name']), 'green_time': vector})
return self.scheduler
def get_fitness(self, vector):
self.generate_sceduler(vector)
self.traffic.generate_intersection(scheduler=self.scheduler)
cars = self.traffic.simulate()
scheduler_score = self.traffic.calculate_simulation_score(cars)
return scheduler_score
def get_value(self, i, j=None):
if self.is_discrete(i):
if j:
return self._discrete_values[i][j]
return self._discrete_values[i][random.randint(0, len(self._discrete_values[i]) - 1)]
return random.uniform(self._lower_bounds[i], self._upper_bounds[i])
def get_lower_bound(self, i):
"""
This won't be called except for continuous variables, so we don't need to worry about returning None.
"""
return self._lower_bounds[i]
def get_upper_bound(self, i):
"""
This won't be called except for continuous variables.
"""
return self._upper_bounds[i]
def get_num_discrete_values(self, i):
if self.is_discrete(i):
return len(self._discrete_values[i])
return float('+inf')
def get_index(self, i, v):
"""
Because self.discrete_values is in sorted order, we can use binary search.
"""
return HarmonySearch.binary_search(self._discrete_values[i], v)
@staticmethod
def binary_search(a, x):
"""
Code courtesy Python bisect module: http://docs.python.org/2/library/bisect.html#searching-sorted-lists
"""
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
def is_variable(self, i):
return self._variable[i]
def is_discrete(self, i):
return self._discrete_values[i] is not None
def get_num_parameters(self):
return len(self._lower_bounds)
def use_random_seed(self):
return hasattr(self, '_random_seed') and self._random_seed
def get_max_imp(self):
return self._max_imp
def get_hmcr(self):
return self._hmcr
def get_par(self):
return self._par
def get_hms(self):
return self._hms
def get_mpai(self):
return self._mpai
def get_mpap(self):
return self._mpap
def maximize(self):
return self._maximize
| UTF-8 | Python | false | false | 4,289 | py | 15 | harmony_search.py | 13 | 0.622756 | 0.613896 | 0 | 134 | 31.007463 | 147 |
jmseb3/bakjoon | 1,984,274,930,891 | 9c1a37c4a58a24754015a8da61eca92aefb11510 | 30b97efb2f36f81aa684d16d19e0e2db17f2967d | /20. 분할정복/11401.py | ef027bb81875571661a505b1727c5a964c260429 | []
| no_license | https://github.com/jmseb3/bakjoon | 0a784a74c6476ef51864e2ada9d2551c7c7979eb | a38db54e851372059b0e45add92e43e556835e62 | refs/heads/main | 2023-08-25T08:43:04.579785 | 2021-10-01T08:40:37 | 2021-10-01T08:40:37 | 362,287,450 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
input = sys.stdin.readline
n, k = map(int, input().rstrip().split())
p = 1000000007
# 페르마소정리 이용
# p가 소수이고 a,p가 서로수 일때 a**(p-1)%p = 1
# 변형을 시켜서 a**(p-2)%p = p**(-1) 으로 사용가능하다.
# 여기서 주어진수 p=1000000007 은 소수이다.
# a^m에 대하여
# 짝수면 m//2
# 홀수면 m//2 한뒤 a를 곱해줌
def pow(n, k):
global p
if k == 1:
return n
next_pow = pow(n, k//2)
if k % 2 == 0:
return (next_pow**2) % p
else:
return ((next_pow ** 2) * n) % p
factori = [1] * (n+1)
for i in range(2, n+1):
factori[i] = (factori[i-1]*i) % p
print(factori[n]*(pow(factori[n-k], p-2))*(pow(factori[k], p-2)) % p)
| UTF-8 | Python | false | false | 743 | py | 288 | 11401.py | 286 | 0.516908 | 0.454106 | 0 | 38 | 15.342105 | 69 |
langrind/UQtie | 3,959,959,897,557 | da6e0580ba48d8bc652ae9c3464298d4f63e8a46 | ef9811b8749090154b83e0d18f02c7bf91f00679 | /uqtie/UqtStylesheet.py | 0e1c11840edfb52bafea761c6a2698a9114b7c8c | [
"MIT"
]
| permissive | https://github.com/langrind/UQtie | 9941faae8747c29824bf552eaac3e5cdfbeea078 | 032693695117083cdd715beb2a679e4ccee060d8 | refs/heads/master | 2020-09-03T05:29:22.652776 | 2020-02-25T03:57:34 | 2020-02-25T03:57:34 | 219,397,281 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Class that manages a UQtie application's stylesheet
There are advantages and disadvantages to Qt stylesheets, Qt settings, and
Qt Style. They aren't mutually exclusive, and they don't all play together
either.
This module attempts to make it possible to use a stylesheet while still
using the QFontDialog to select fonts, and zoom-in/zoom-out shortcuts such
as CTRL-+.
The idea is that we have variables (which come from somewhere, right now a
pickled dictionary, but later could be elsewhere, or multiple elsewheres).
These variables can be used in the stylesheet, such that the dynamic changes in
appearance can be made at runtime without requiring stylesheet changes, and the
changes can be persisted also without changing the stylesheet.
Font, font size and widget sizes (e.g QScrollBar:vertical { width } ) seem like
good candidates to be determined dynamically instead of via hardcoded values
in a stylesheet. That way, when you have high-DPI monitor and less-than-perfect
vision, you can adjust easily.
Part of my motivation for doing this is because PyQt running on Windows, Linux,
MacOS and Cygwin doesn't behave identically, not even close to identical in
some ways. If they would all have identical QScreens given identical monitors
and graphics cards, then you could just make reasonable choices for your stylesheet,
and rely on the OS GUI settings.
Variables you can use in a QSS file are:
$main_font_family
$main_font_weight (not supported yet)
$main_font_size
$scroll_bar_width
Futures:
1) Add more variables
2) Make an inspector widget for stylesheet variables
"""
from __future__ import print_function
from pathlib import Path
import os,re,pickle
from PyQt5.QtCore import QSettings
class StylesheetManager(object):
"""
Class that manages a UQtie application's stylesheet
"""
# If no stylesheet has been provided, use this one. Should this
# really have scroll bar dimensions?
defaultStylesheet = """
QWidget {
font-family: $main_font_family;
font-weight: $main_font_weight;
font-size: $main_font_size;
}
QScrollBar:vertical {
width: $scroll_bar_width;
}
QScrollBar:horizontal {
height: $scroll_bar_width;
}
"""
def __init__(self, app, settings, appName ):
self.stylesheetFileName = None
self.stylesheetVarsFileName = None
self.app = app
self.appName = appName
self.appSettings = settings
self.varsDict = {}
self.defaultVarsDict = {
'main_font_family': 'Arial',
'main_font_weight': 'Regular',
'main_font_size' : '16pt',
'scroll_bar_width': '15px',
}
self.determine_stylesheet_filenames(appName)
def determine_stylesheet_path(self):
"""
Fill in self.appDirPath appropriately
"""
self.appDirPath = None
if os.name == 'nt':
# running on Windows
appDirPath = Path(os.path.expanduser('~')) / 'Application Files'
if not appDirPath.is_dir():
print ( '{p} is not a directory'.format(p=appDirPath))
return
appDirPath /= self.appName
if not appDirPath.is_dir():
try:
appDirPath.mkdir()
except:
print ( 'Could not create directory {p}'.format(p=appdirPath))
return
self.appDirPath = appDirPath
else:
# On other OS, we use Settings to determine where stylesheet lives
if self.appSettings:
self.appDirPath = Path(os.path.dirname(self.appSettings.fileName()))
def determine_stylesheet_filenames(self, appName):
"""
Fill in stylesheet filenames appropriately
"""
self.determine_stylesheet_path()
if self.appDirPath:
#print ("self.appDirPath: {}".format(self.appDirPath))
baseName = str(self.appDirPath / appName)
self.stylesheetFileName = baseName + '.qss'
self.stylesheetVarsFileName = baseName + 'Vars.pickle'
def apply(self):
"""
Apply the application window stylesheet, including variable value substitution
"""
# This means:
# 1) Read it from a file
# 2) Replace all '{' and '}' which are QSS syntax (e.g. 'QWidget {') with
# '{{' and '}}'. This protects the QSS syntax during the next steps.
# 3) Replace all $word with {word} thus turning the string into a Python
# string with argument specifiers (e.g. '{main_font_family}').
# 4) Replace all the format-string argument specifiers with variables
# 5) Apply the resulting stylesheet to the App
#print ( "apply: {}".format(self.stylesheetFileName))
stylesheetText = None
try:
with open(self.stylesheetFileName, 'r') as content_file:
stylesheetText = content_file.read()
except:
pass
if not stylesheetText:
print(f'No file {self.stylesheetFileName}')
stylesheetText = self.defaultStylesheet
try:
with open(self.stylesheetFileName, 'w') as content_file:
content_file.write(stylesheetText)
except:
print(f'Could not write default stylesheet file {self.stylesheetFileName}')
# These next two could be done in one pass using the cool multiple_replace() from
# https://stackoverflow.com/questions/15175142/how-can-i-do-multiple-substitutions-using-regex-in-python
# But this is easier to read
stylesheetText = stylesheetText.replace('{', '{{')
stylesheetText = stylesheetText.replace('}', '}}')
# Turn all $word into {word}
stylesheetText = re.sub(r'\$(([a-z]|[A-Z])\w*)', r'{\1}', stylesheetText)
# for k, v in self.varsDict.items():
# print ( f'{k}: {v}' )
# substitute everything from our variables dict
result = stylesheetText.format_map(self.varsDict)
# apply
self.app.setStyleSheet(result)
def save_stylesheet_vars(self ):
"""Write our variables dict out to a file"""
with open(self.stylesheetVarsFileName, 'wb') as h:
pickle.dump(self.varsDict, h)
def set_missing_stylesheet_vars(self ):
"""Set all the missing variables in the variables dict to default values"""
for k in self.defaultVarsDict:
self.varsDict.setdefault(k, self.defaultVarsDict[k])
def read_stylesheet_vars(self ):
"""Read all the variables from saved file into our dict"""
try:
with open(self.stylesheetVarsFileName, 'rb') as h:
self.varsDict = pickle.loads(h.read())
except FileNotFoundError as e:
print(e)
# Maybe some values are missing, fix it up
self.set_missing_stylesheet_vars()
def zoom_in(self):
"""Increase the value of variables that influence the size of the UI"""
# Trim off 'pt' at the end of the string. Maybe a little fragile...
fontSize = int(self.varsDict['main_font_size'][0:-2])
fontSize += 1
self.varsDict['main_font_size'] = f'{fontSize}pt'
# Trim off 'px' at the end of the string. Also a little fragile...
scrollBarWidth = int(self.varsDict['scroll_bar_width'][0:-2])
scrollBarWidth += 1
self.varsDict['scroll_bar_width'] = f'{scrollBarWidth}px'
def zoom_out(self):
"""Decrease the value of variables that influence the size of the UI"""
# Trim off 'pt' at the end of the string. Maybe a little fragile...
fontSize = int(self.varsDict['main_font_size'][0:-2])
if fontSize > 0:
fontSize -= 1
self.varsDict['main_font_size'] = f'{fontSize}pt'
# Trim off 'px' at the end of the string. Also a little fragile...
scrollBarWidth = int(self.varsDict['scroll_bar_width'][0:-2])
if scrollBarWidth > 0:
scrollBarWidth -= 1
self.varsDict['scroll_bar_width'] = f'{scrollBarWidth}px'
# Variable setters / Properties - don't want to keep adding these for every variable
# and the way it is evolving, it seems like we don't have to
def set_main_font_family(self, family):
self.varsDict['main_font_family'] = family
def set_main_font_weight(self, weight):
self.varsDict['main_font_weight'] = weight
def set_main_font_size(self, size):
self.varsDict['main_font_size'] = size
## Test Code
import argparse, sys
from PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow
class TestAppMainWindow(QMainWindow):
def __init__(self, parsedArgs, **kwargs ):
super(TestAppMainWindow, self).__init__()
self.setup_ui()
self.show()
def setup_ui(self):
vbox = QVBoxLayout(self.centralWidget())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--test-write-vars', action='store_const', const=True,
help='Test writing variables to file')
parser.add_argument('-r', '--test-read-vars', action='store_const', const=True,
help='Test reading variables from file')
parsedArgs,unparsedArgs = parser.parse_known_args()
organizationName='Craton'
appName='StyMgrTest'
# Pass unparsed args to Qt, might have some X Windows args, like --display
qtArgs = sys.argv[:1] + unparsedArgs
app = QApplication(qtArgs)
settings = QSettings(organizationName, appName)
styMgr = StylesheetManager(app, settings, appName)
if parsedArgs.test_write_vars:
print('write')
styMgr.save_stylesheet_vars()
sys.exit(0)
if parsedArgs.test_read_vars:
print('read')
styMgr.read_stylesheet_vars()
for k, v in styMgr.varsDict.items():
print(k, v)
sys.exit(0)
mainw = TestAppMainWindow(parsedArgs, app=app, organizationName=organizationName, appName=appName)
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 10,333 | py | 9 | UqtStylesheet.py | 4 | 0.622762 | 0.618891 | 0 | 282 | 35.641844 | 112 |
sungsooha/ftfy-official | 858,993,469,398 | d0dfb9736f313ea118540eaded279a8c96c04c83 | 7e32826387f3cc9c966a201cec4a5d6bf3233edf | /anal_metric.py | 810989ad87542cd9e8ba3493807301ef7b0867a3 | []
| no_license | https://github.com/sungsooha/ftfy-official | 6b80cae1c9ad7bfc6cba2ce76be74804acbfd429 | d4526fd03437be59bff34f935bdf9253fd17409a | refs/heads/master | 2020-04-17T02:03:17.957335 | 2019-03-12T14:08:39 | 2019-03-12T14:08:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import tensorflow as tf
import numpy as np
import pickle
from utils.Param import get_default_param
from utils.eval import fpr, retrieval_recall_K
from network.model_fn import triplet_model_fn
from network.dataset.sem_patchdata import input_fn
from network.dataset.sem_patchdata_ext import input_fn as sem_input_fn
from network.train import TripletEstimator
from skimage.io import imsave
tf.logging.set_verbosity(tf.logging.INFO)
np.random.seed(2019)
tf.set_random_seed(2019)
param = get_default_param(mode='AUSTIN', log_dir=None)
param.log_dir = './log/campus'
param.data_dir = '/home/sungsooha/Desktop/Data/ftfy/austin'
#sem_data_dir = './Data/sem/train'
sem_data_dir = '/home/sungsooha/Desktop/Data/ftfy/sem/train'
sem_test_datasets = []
for f in os.listdir(sem_data_dir):
if os.path.isdir(os.path.join(sem_data_dir,f)):
sem_test_datasets.append(f)
sem_test_datasets = sorted(sem_test_datasets)
print(sem_test_datasets)
param.train_datasets = 'campus_patch'
param.test_datasets = 'sem' #'human_patch'
param.batch_size = 128
param.model_path = './log/campus/ckpt'
do_test_fpr = True
do_test_retrieval = True
do_collect_retrieval_5 = True
print('Preparing data pipeline ...')
with tf.device('/cpu:0'), tf.name_scope('input'):
# test_dataset, test_data_sampler = input_fn(
# data_dir=param.data_dir,
# base_patch_size=param.base_patch_size,
# patches_per_row=param.patches_per_row,
# patches_per_col=param.patches_per_col,
# batch_size=param.batch_size,
# patch_size=param.patch_size,
# n_channels=param.n_channels
# )
test_dataset, test_data_sampler = sem_input_fn(
data_dir=sem_data_dir,
base_patch_size=param.base_patch_size,
patches_per_row=10,
patches_per_col=10,
batch_size=param.batch_size,
patch_size=param.patch_size,
n_channels=param.n_channels
)
data_iterator = tf.data.Iterator.from_structure(
test_dataset.output_types,
test_dataset.output_shapes
)
test_dataset_init = data_iterator.make_initializer(test_dataset)
batch_data = data_iterator.get_next()
print('load data ...')
test_data_sampler.load_dataset(
dir_name=sem_test_datasets,
ext='bmp',
patch_size=param.patch_size,
n_channels=param.n_channels,
debug=True
)
print('Loading training stats: %s' % param.train_datasets)
file = open(os.path.join(param.log_dir, 'stats_%s.pkl' % param.train_datasets), 'rb')
mean, std = pickle.load(file)
print('Mean: {:.5f}'.format(mean))
print('Std : {:.5f}'.format(std))
test_data_sampler.normalize_data(mean, std)
print('Creating the model ...')
anchors, positives, negatives = batch_data
spec = triplet_model_fn(
anchors, positives, negatives,
n_feats=param.n_features,
mode='TEST', cnn_name=param.cnn_name, shared_batch_layers=True,
name=param.project_name
)
estimator = TripletEstimator(spec, model_path=param.model_path)
if do_test_fpr:
print('Test for FPR95 ...')
test_data_sampler.set_mode(1)
test_match = estimator.run_match(test_dataset_init)
fpr95 = fpr(test_match.labels, test_match.scores, recall_rate=0.95)
print('FPR95: {:.5f}'.format(fpr95))
if do_test_retrieval or do_collect_retrieval_5:
print('Test retrieval ...')
test_data_sampler.set_mode(2)
test_rrr = estimator.run_retrieval(test_dataset_init)
ind = test_rrr.index
labels = test_data_sampler.get_labels(ind)
rrr, rrr_col = retrieval_recall_K(
features=test_rrr.features,
labels=labels,
is_query=test_rrr.scores,
K=[1, 5, 10, 20, 30],
collect_top_5=do_collect_retrieval_5
)
print('Retrieval: {}'.format(rrr))
if do_collect_retrieval_5:
failed_ind, = np.where(rrr_col[:, 6]==0)
count = 0
output_dir = os.path.join(param.log_dir, param.test_datasets)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file = open(os.path.join(output_dir, 'retrieval_fail.txt'), 'w')
for idx in failed_ind:
q_idx = ind[rrr_col[idx, 0]]
top5_ind = ind[rrr_col[idx, 1:6]]
patch_set = []
#patch_info = []
patch, patch_idx = test_data_sampler.get_patch_by_retrieval_idx(q_idx)
patch = np.squeeze(patch)
patch = patch * std + mean
patch = (patch - patch.min()) / patch.ptp()
patch_set.append(patch)
# todo: this doesn't work for sem patch dataset
patch_gid = patch_idx // 78
irow = (patch_idx%78) // 6
icol = (patch_idx%78) % 6
#patch_info.append((patch_gid, irow, icol))
file.write("{:d} {:d} {:d} {:d}".format(
count, patch_gid, irow, icol
))
for i in top5_ind:
patch, patch_idx = test_data_sampler.get_patch_by_retrieval_idx(i)
patch = np.squeeze(patch)
patch = patch * std + mean
patch = (patch - patch.min()) / patch.ptp()
patch_set.append(patch)
# todo: this doesn't work for sem patch dataset
patch_gid = patch_idx // 78
irow = (patch_idx % 78) // 6
icol = (patch_idx % 78) % 6
#patch_info.append((patch_gid, irow, icol))
file.write(" {:d} {:d} {:d} {:d}".format(
i, patch_gid, irow, icol
))
file.write("\n")
patch_set = np.hstack(patch_set)
patch_set = patch_set * 255.
patch_set = patch_set.astype(np.uint8)
imsave(os.path.join(output_dir, 'fail_{:d}.bmp'.format(count)), patch_set)
count += 1
file.close()
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots(1, 6)
# for _ax, p, info in zip(ax, patches, patch_info):
# _ax.imshow(np.squeeze(p), cmap='gray')
# _ax.axis('off')
# _ax.set_title("{}".format(info))
# plt.show()
#q_label = labels[q_idx]
#top5_labels = labels[top5_ind]
# print('Query: {}, top 5: {}'.format(
# q_label, top5_labels
# ))
#retrieval_data = test_data_sampler.data['retrieval']
# out_dir = os.path.join(param.log_dir, 'metrics_{}_{}.npy'.format(
# param.train_datasets, param.test_datasets
# ))
# metric = dict(
# loss=None,
# fpr95=fpr95,
# retrieval=rrr
# )
# np.save(out_dir, metric)
| UTF-8 | Python | false | false | 6,609 | py | 51 | anal_metric.py | 43 | 0.593887 | 0.581177 | 0 | 202 | 31.618812 | 86 |
iyevstifieiev/dump | 5,566,277,638,175 | caac10dee17d2add97d859af76dbc9eef284fa58 | a571ee90244af1d3ca1f44389ca33fee5533f55c | /parse_feed.py | 5d649b31dd0af082db6317077628c36ce5bc130f | []
| no_license | https://github.com/iyevstifieiev/dump | b2df46b92887c7bea1271262a0ef0c823f6718da | 96ccf022bc03eb5631ef98eb9ad581d8bc15ae11 | refs/heads/master | 2022-12-02T01:43:01.349956 | 2020-08-03T08:51:47 | 2020-08-03T08:51:47 | 284,649,955 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import io
import os
parser = argparse.ArgumentParser(description='Parse feed columns and values of the first item')
parser.add_argument(
'path_to_file',
help='path to the feed file'
)
parser.add_argument(
'-d', '--delimiter',
help='delimiter symbol to split feed lines',
default=','
)
args = parser.parse_args()
def split_raw_line(raw_line):
item = []
i = 0
join_next = False
for _item in raw_line:
if _item.startswith('"'):
item.append(_item)
if _item.endswith('"'):
i += 1
else:
join_next = True
else:
if not join_next:
item.append(_item)
i += 1
else:
item[i] += _item
if _item.endswith('"'):
join_next = False
i += 1
return item
def get_output_file_name(input_file):
folder_path, file_name = os.path.split(input_file)
split_name = file_name.split('.')
split_name[-2] = split_name[-2] + '_parsed'
split_name[-1] = 'txt'
output_file = os.path.join(folder_path, '.'.join(split_name))
return output_file
if __name__ == '__main__':
file = args.path_to_file
delimiter = args.delimiter
output = get_output_file_name(file)
with io.open(file, 'r', encoding='utf-8') as feed:
columns = feed.readline().split(delimiter)
raw_item = feed.readline().split(delimiter)
item = split_raw_line(raw_item)
with io.open(output, 'w', encoding='utf-8') as output_file:
for c, i in zip(columns, item):
line = c + '\t' + i + '\n'
output_file.write(line)
| UTF-8 | Python | false | false | 1,723 | py | 3 | parse_feed.py | 2 | 0.535694 | 0.53047 | 0 | 62 | 26.790323 | 95 |
SCons/scons | 11,647,951,306,829 | c98772adb7dca7ba31ce8549745a04e3532f5ec8 | 110044654f706e920380dad2779bb32a77f1f26f | /SCons/Util/envs.py | 64e728a8ca15048ac2643bf6597855b3eb464587 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/SCons/scons | 89327bb9635cee6e7cc59249edca9cd859d7d1ff | b2a7d7066a2b854460a334a5fe737ea389655e6e | refs/heads/master | 2023-09-01T19:37:03.603772 | 2023-08-28T04:32:42 | 2023-08-28T04:32:42 | 104,670,160 | 1,827 | 342 | MIT | false | 2023-09-14T15:13:21 | 2017-09-24T19:23:46 | 2023-09-14T06:52:51 | 2023-09-14T15:13:21 | 24,244 | 1,774 | 287 | 656 | Python | false | false | # SPDX-License-Identifier: MIT
#
# Copyright The SCons Foundation
"""Various SCons utility functions
Routines for working with environments and construction variables
that don't need the specifics of Environment.
"""
import os
from types import MethodType, FunctionType
from typing import Union
from .types import is_List, is_Tuple, is_String
def PrependPath(
oldpath, newpath, sep=os.pathsep, delete_existing: bool=True, canonicalize=None
) -> Union[list, str]:
"""Prepend *newpath* path elements to *oldpath*.
Will only add any particular path once (leaving the first one it
encounters and ignoring the rest, to preserve path order), and will
:mod:`os.path.normpath` and :mod:`os.path.normcase` all paths to help
assure this. This can also handle the case where *oldpath*
is a list instead of a string, in which case a list will be returned
instead of a string. For example:
>>> p = PrependPath("/foo/bar:/foo", "/biz/boom:/foo")
>>> print(p)
/biz/boom:/foo:/foo/bar
If *delete_existing* is ``False``, then adding a path that exists will
not move it to the beginning; it will stay where it is in the list.
>>> p = PrependPath("/foo/bar:/foo", "/biz/boom:/foo", delete_existing=False)
>>> print(p)
/biz/boom:/foo/bar:/foo
If *canonicalize* is not ``None``, it is applied to each element of
*newpath* before use.
"""
orig = oldpath
is_list = True
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = False
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [newpath] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths = list(map(canonicalize, newpaths))
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and normpath not in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
return sep.join(paths)
def AppendPath(
oldpath, newpath, sep=os.pathsep, delete_existing: bool=True, canonicalize=None
) -> Union[list, str]:
"""Append *newpath* path elements to *oldpath*.
Will only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and will
:mod:`os.path.normpath` and :mod:`os.path.normcase` all paths to help
assure this. This can also handle the case where *oldpath*
is a list instead of a string, in which case a list will be returned
instead of a string. For example:
>>> p = AppendPath("/foo/bar:/foo", "/biz/boom:/foo")
>>> print(p)
/foo/bar:/biz/boom:/foo
If *delete_existing* is ``False``, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
>>> p = AppendPath("/foo/bar:/foo", "/biz/boom:/foo", delete_existing=False)
>>> print(p)
/foo/bar:/foo:/biz/boom
If *canonicalize* is not ``None``, it is applied to each element of
*newpath* before use.
"""
orig = oldpath
is_list = True
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = False
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [newpath] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths = list(map(canonicalize, newpaths))
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and normpath not in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
return sep.join(paths)
def AddPathIfNotExists(env_dict, key, path, sep=os.pathsep) -> None:
"""Add a path element to a construction variable.
`key` is looked up in `env_dict`, and `path` is added to it if it
is not already present. `env_dict[key]` is assumed to be in the
format of a PATH variable: a list of paths separated by `sep` tokens.
>>> env = {'PATH': '/bin:/usr/bin:/usr/local/bin'}
>>> AddPathIfNotExists(env, 'PATH', '/opt/bin')
>>> print(env['PATH'])
/opt/bin:/bin:/usr/bin:/usr/local/bin
"""
try:
is_list = True
paths = env_dict[key]
if not is_List(env_dict[key]):
paths = paths.split(sep)
is_list = False
if os.path.normcase(path) not in list(map(os.path.normcase, paths)):
paths = [path] + paths
if is_list:
env_dict[key] = paths
else:
env_dict[key] = sep.join(paths)
except KeyError:
env_dict[key] = path
class MethodWrapper:
"""A generic Wrapper class that associates a method with an object.
As part of creating this MethodWrapper object an attribute with the
specified name (by default, the name of the supplied method) is added
to the underlying object. When that new "method" is called, our
:meth:`__call__` method adds the object as the first argument, simulating
the Python behavior of supplying "self" on method calls.
We hang on to the name by which the method was added to the underlying
base class so that we can provide a method to "clone" ourselves onto
a new underlying object being copied (without which we wouldn't need
to save that info).
"""
def __init__(self, obj, method, name=None) -> None:
if name is None:
name = method.__name__
self.object = obj
self.method = method
self.name = name
setattr(self.object, name, self)
def __call__(self, *args, **kwargs):
nargs = (self.object,) + args
return self.method(*nargs, **kwargs)
def clone(self, new_object):
"""
Returns an object that re-binds the underlying "method" to
the specified new object.
"""
return self.__class__(new_object, self.method, self.name)
# The original idea for AddMethod() came from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# https://code.activestate.com/recipes/223613
#
# Changed as follows:
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
# * The instance/class detection is changed a bit, as it's all
# new-style classes now with Py3.
# * The by-hand construction of the function object from renamefunction()
# is not needed, the remaining bit is now used inline in AddMethod.
def AddMethod(obj, function, name=None) -> None:
"""Add a method to an object.
Adds *function* to *obj* if *obj* is a class object.
Adds *function* as a bound method if *obj* is an instance object.
If *obj* looks like an environment instance, use :class:`~SCons.Util.MethodWrapper`
to add it. If *name* is supplied it is used as the name of *function*.
Although this works for any class object, the intent as a public
API is to be used on Environment, to be able to add a method to all
construction environments; it is preferred to use ``env.AddMethod``
to add to an individual environment.
>>> class A:
... ...
>>> a = A()
>>> def f(self, x, y):
... self.z = x + y
>>> AddMethod(A, f, "add")
>>> a.add(2, 4)
>>> print(a.z)
6
>>> a.data = ['a', 'b', 'c', 'd', 'e', 'f']
>>> AddMethod(a, lambda self, i: self.data[i], "listIndex")
>>> print(a.listIndex(3))
d
"""
if name is None:
name = function.__name__
else:
# "rename"
function = FunctionType(
function.__code__, function.__globals__, name, function.__defaults__
)
if hasattr(obj, '__class__') and obj.__class__ is not type:
# obj is an instance, so it gets a bound method.
if hasattr(obj, "added_methods"):
method = MethodWrapper(obj, function, name)
obj.added_methods.append(method)
else:
method = MethodType(function, obj)
else:
# obj is a class
method = function
setattr(obj, name, method)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| UTF-8 | Python | false | false | 10,820 | py | 1,493 | envs.py | 718 | 0.61488 | 0.613494 | 0 | 325 | 32.292308 | 87 |
kochie/blog.kochie.io | 14,396,730,389,680 | e62d6df9520f3e607dd543f91d0a71f648c655ea | b83da7db77cf4aebcf05c990b5c258ec1b10ecba | /articles/08-s3-file-limit/test.py | ffb5befd53209bda7d1fd90fffa3541dc8a1253c | [
"MIT"
]
| permissive | https://github.com/kochie/blog.kochie.io | d45fc1452f2feff733b1458d761d5308ce7956d2 | f7ba69b3a81c1e94320995fb52cbe627e665f471 | refs/heads/main | 2023-08-17T14:11:17.410855 | 2023-08-13T22:57:37 | 2023-08-13T22:57:37 | 193,373,261 | 3 | 0 | MIT | false | 2023-08-13T22:57:38 | 2019-06-23T16:51:44 | 2023-07-20T05:19:57 | 2023-08-13T22:57:37 | 166,704 | 3 | 0 | 8 | TypeScript | false | false | if __name__ == "__main__":
for i in range(100000):
print(chr(i))
| UTF-8 | Python | false | false | 77 | py | 147 | test.py | 24 | 0.467532 | 0.38961 | 0 | 3 | 24.666667 | 27 |
olttwa/Closest-Pair-Of-Points-Multidimensional-Space | 1,494,648,628,152 | 21cc3521976227ee34df26d3802a0d1cf147b191 | a0bfb8852a3527dc954e68e2bf5f53f1f747e651 | /Closest Pair in Multidimensional Space/sort.py | 75478545efaee8232f3c057857beeae25acdb56f | []
| no_license | https://github.com/olttwa/Closest-Pair-Of-Points-Multidimensional-Space | 053060e5aee997812a3f327053521df746f4aa6c | be1ed7549616feacf22d068f0f7aaa98c6efb0b4 | refs/heads/master | 2021-01-22T12:57:45.855509 | 2016-05-25T17:18:36 | 2016-05-25T17:18:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#Use Quicksort to sort given collection by desired coordinate
def partition(alist, i):
count = 0
li = []
mid_pos = (len(alist)+1)//2
mid_element = alist[mid_pos-1]
if alist[0][i] < mid_element[i] < alist[-1][i] \
or alist[-1][i] < mid_element[i] < alist[0][i]:
alist[0], alist[mid_pos-1] = alist[mid_pos-1], alist[0]
if alist[0][i] < alist[-1][i] < mid_element[i] \
or mid_element[i] < alist[-1][i] < alist[0][i]:
alist[0], alist[-1] = alist[-1], alist[0]
start = 0
pivot = alist[start]
pindex = start+1
for j in range(start+1, len(alist)):
count = count + 1
if alist[j][i] < pivot[i]:
alist[j], alist[pindex] = alist[pindex], alist[j]
pindex += 1
li.append(count)
alist[start], alist[pindex-1] = alist[pindex-1], alist[start]
return alist[:pindex], alist[pindex:]
def quicksort(alist, i):
start = 0
end = len(alist)-1
if start<end:
left, right = partition(alist, i)
return quicksort( left[:-1], i) + \
quicksort([left[-1]], i) + \
quicksort(right, i)
else:
return alist | UTF-8 | Python | false | false | 1,186 | py | 5 | sort.py | 3 | 0.539629 | 0.512648 | 0 | 36 | 31.972222 | 65 |
psgpyc/SearchAlgoEmployee-Python | 515,396,125,364 | a89a4a13a0ba6c8b6728ae92fce0da2c51d6015e | 051a30c1181532006ecc8cb90706d6c215d864ee | /projectone/UsersInitialize.py | 616a5e4f45f9e158453d7835b3692a89ded8a695 | []
| no_license | https://github.com/psgpyc/SearchAlgoEmployee-Python | da8e7e020a7339699fe3cabcfbae1afa6bb38df1 | 977ffdd7c76f96763d27303232ee6672765fb812 | refs/heads/master | 2022-10-14T05:47:29.872866 | 2018-05-16T01:29:22 | 2018-05-16T01:29:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
This is a class containing core code for implementing CRUD.
'''
import psycopg2
global c , conn
try:
conn = psycopg2.connect("host='127.0.0.1' dbname='usersdb' user='postgres' password='nepal123'")
except:
raise Exception("error in credentials")
finally:
c = conn.cursor()
class UserInit:
@staticmethod
def initialize(col, value):
c.execute("select * from userdetails where {} = '{}'".format(col, value))
allrecords = c.fetchall()
# print(allrecords)
if len(allrecords) == 0:
print("SORRY NO RECORD FOUND")
# else:
# for i in allrecords:
# print(
# "NAME: {1} \nEmail:{2} \nPhone:{3} \nBio:{4} \nDOB:{5} \nGender:{6} \nAddress:{7} \nLongitude:{8} \nLatitude:{9} \nSocialMEdia:www.facebook.com/{10}".format(
# i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9], i[10]))
return allrecords
@staticmethod
def initialize_del(id):
c.execute("select * from userdetails where id = '{0}'".format(id))
allrecords = c.fetchall()
if len(allrecords) == 0:
print("SORRY !! NO RECORD FOUND")
else:
try:
c.execute("delete from userdetails where id = '{0}'".format(id))
print("successfully deleted !!")
conn.commit()
except:
print("error")
conn.rollback()
@staticmethod
def initialize_update(col, old, new):
c.execute("select * from userdetails where {} = '{}'".format(col, old))
allrecords = c.fetchall()
if len(allrecords) == 0:
print("SORRY NO RECORD FOUND")
else:
query = "update userdetails set {0}='{1}' where {0} = '{2}'".format(col,
new,
old)
try:
c.execute(query)
print("sucessfully Updated !!!!")
except:
raise Exception("Error")
# conn.rollback()
conn.commit()
| UTF-8 | Python | false | false | 2,245 | py | 9 | UsersInitialize.py | 9 | 0.474388 | 0.455234 | 0 | 79 | 27.329114 | 195 |
mostafizcse/eCommerce | 11,871,289,628,226 | fb108ea3f4be6597ef39db1bcdc5a4ae3145302a | 979abbfd0d54581773d7b4d8755748992540dc34 | /mainApp/models.py | 824caf627dbb98e217f0084a60b3113603ce7e48 | []
| no_license | https://github.com/mostafizcse/eCommerce | db623c623eed8ceb3d236c93847dd3b3694b150b | 10fa5876af7bab37e8d418d7bc979b49013d3ff8 | refs/heads/master | 2022-12-10T13:42:42.286256 | 2019-02-02T06:14:00 | 2019-02-02T06:14:00 | 168,800,904 | 0 | 1 | null | false | 2022-11-22T02:37:44 | 2019-02-02T06:11:06 | 2019-02-02T06:27:36 | 2022-11-22T02:37:41 | 6,012 | 0 | 1 | 3 | HTML | false | false | from django.db import models
from django.utils import timezone
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=30)
icon = models.CharField(max_length=25, blank=True, null= True)
def __str__(self):
return self.name
class SubCategory(models.Model):
parent_category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='subcategory')
sub_name = models.CharField(max_length=35)
def __str__(self):
return self.sub_name
class Tag(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Product(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(default='slug')
category = models.ForeignKey(SubCategory, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
new_price = models.IntegerField()
old_price = models.IntegerField(blank=True)
description = models.TextField()
image = models.FileField(upload_to='product', blank=True, null=True)
def __str__(self):
return self.title
class ProductImages(models.Model):
productId = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='product_images')
image = models.FileField(upload_to='product')
def __str__(self):
return self.productId.title
class BannerSlider(models.Model):
productId = models.ForeignKey(Product, on_delete=models.CASCADE)
image = models.FileField(upload_to='BannerSlider')
subTitle = models.CharField(max_length=200)
title = models.CharField(max_length=150)
details = models.TextField()
posted_on = models.DateTimeField(auto_now=False, auto_now_add=False, default=timezone.now)
def __str__(self):
return self.title
class HotDeal(models.Model):
productId = models.ForeignKey(Product, on_delete=models.CASCADE)
offer = models.CharField(max_length=10)
deal_time = models.DateTimeField(auto_now=False, auto_now_add=False, default=timezone.now)
def __str__(self):
return self.productId.title
class TodayDeal(models.Model):
productId = models.ForeignKey(Product, on_delete=models.CASCADE)
offer = models.CharField(max_length=10)
deal_time = models.DateTimeField(auto_now=False, auto_now_add=False, default=timezone.now)
def __str__(self):
return self.productId.title
class SpeacialDeal(models.Model):
productId = models.ForeignKey(Product, on_delete=models.CASCADE)
def __str__(self):
return self.productId.title | UTF-8 | Python | false | false | 2,564 | py | 41 | models.py | 30 | 0.706708 | 0.698518 | 0 | 71 | 35.126761 | 103 |
timewaitsfor/LeetCode | 2,929,167,705,900 | f69c36724616c40a7e7012cf56792ed72c6a991d | fca6f2bdf93db14de4868d4614091830cf22c083 | /TitleNumberOrder/389_findTheDifference.py | ba0a517d7d255cb0ad2ed7cb6c0a158acb3d247b | []
| no_license | https://github.com/timewaitsfor/LeetCode | 8627fd9b4a72b51fe31c726c66ed95a899a89367 | 54a1be77c59bedc424265c2b2e0313a689ce754c | refs/heads/master | 2023-07-11T01:07:11.143621 | 2021-08-19T06:05:18 | 2021-08-19T06:05:18 | 285,533,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def findTheDifference(self, s: str, t: str) -> str:
memo = {}
for c in t:
if c not in memo:
memo[c] = 1
else:
memo[c] += 1
for cc in s:
memo[cc] -= 1
res = ""
for ccc in memo:
if memo[ccc] >= 1:
res += ccc
return res
s = "ae"
t = "aea"
solution = Solution()
res = solution.findTheDifference(s, t) | UTF-8 | Python | false | false | 466 | py | 69 | 389_findTheDifference.py | 68 | 0.418455 | 0.409871 | 0 | 25 | 17.68 | 55 |
meichenl95/SH_TOMO | 7,962,869,376,409 | 9a69b4deb9a8bb2d03429387db2036d69109b1ff | 8cebbd1e96053876a1bbe60b958ed53f96bbe905 | /USArray/plot_seismograms.py | da94b7ec566bbb7cbf5ccf3adc404b954d901730 | []
| no_license | https://github.com/meichenl95/SH_TOMO | 52e8fb4ee9a0ad5264e045b2658ac35d7913db1a | d7751b38ab4bb2a208c3f907bec046c61fd527fc | refs/heads/master | 2023-07-18T18:04:21.333165 | 2021-09-17T16:03:50 | 2021-09-17T16:03:50 | 407,599,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/home/meichen/anaconda3/bin/python
def plot_seismograms_gcarc(**kwargs):
##-------------------------------##
# This function plot seismograms with y axis as distance(deg).
# Created by Meichen Liu on June 29th, 2019
##-------------------------------##
##parameters
# dirname The directory where seismograms are stored
# filename the name of files
# align align to which phase/marked time.
# 'o' aligns to original time
# 'Tn' aligns to Tn time
# phase phase to be aligned
# cut_b seconds before the align time of the time window
# (positive if before the align time)
# cut_a seconds after the align time of the time window
# odirname the directory to save the output file
# ofile the name of output file
# normalize True or False
import numpy as np
import matplotlib.pyplot as plt
import obspy
import os
dirname = kwargs.get('dirname')
filename = kwargs.get('filename')
align = kwargs.get('align')
phase = kwargs.get('phase')
cut_b = kwargs.get('cut_b')
cut_a = kwargs.get('cut_a')
odirname = kwargs.get('odirname')
ofile = kwargs.get('ofile')
normalize = kwargs.get('normalize')
os.chdir('{}'.format(dirname))
fig,ax = plt.subplots(1,1,figsize=[5,8])
st = obspy.read('{}'.format(filename))
for tr in st:
align_time = tr.stats.sac[align]
start = tr.stats.sac['b']
gcarc = tr.stats.sac['gcarc']
delta = tr.stats.sac['delta']
if align_time == '':
print('No {} in the header file of {}'.format(align,tr.id))
continue
if start > align_time:
print("Time window exceeds the range of {}.".format(tr.id))
continue
N1 = int((-1*start + align_time - cut_b)/delta)
N2 = int((-1*start + align_time + cut_a)/delta)
t = np.linspace(-1*cut_b,cut_a,num=len(tr.data[N1:N2]))
if normalize == "True":
if max(tr.data[N1:N2]) - min(tr.data[N1:N2]) != 0:
ax.plot(t, tr.data[N1:N2]/(max(tr.data[N1:N2]) - min(tr.data[N1:N2]))*2 + gcarc, lw=0.4,c='k',alpha=0.5)
ax.scatter(0,tr.data[int((-1*start+align_time)/delta)]/(max(tr.data[N1:N2]) - min(tr.data[N1:N2]))*2+gcarc,marker='o',color='red',s=0.5)
elif normalize == "False":
ax.plot(t,tr.data[N1:N2]*2*1e-4+gcarc,lw=0.4,c='k',alpha=0.3)
ax.scatter(0,tr.data[int((-1*start+align_time)/delta)]*2*1e-4+gcarc,marker='o',color='red',s=0.5)
else:
print("Please specify normalize option: True or False!")
ax.set_xlim([-1*cut_b-10,cut_a+10])
ax.set_ylim([25,95])
ax.set_xlabel('Time (s) aligned to {}'.format(phase))
ax.set_ylabel('Distance (deg)')
plt.savefig('{}/{}'.format(odirname,ofile))
def plot_seismograms_az(**kwargs):
##-------------------------------##
# This function plot seismograms with y axis as azimuth.
# Created by Meichen Liu on June 29th, 2019
##-------------------------------##
##parameters
# dirname The directory where seismograms are stored
# filename the name of files
# align align to which phase/marked time.
# 'o' aligns to original time
# 'Tn' aligns to Tn time
# phase phase to be aligned
# cut_b seconds before the align time of the time window
# (positive if before the align time)
# cut_a seconds after the align time of the time window
# odirname the directory to save the output file
# ofile the name of output file
# normalize True or False
import numpy as np
import matplotlib.pyplot as plt
import obspy
import os
dirname = kwargs.get('dirname')
filename = kwargs.get('filename')
align = kwargs.get('align')
phase = kwargs.get('phase')
cut_b = kwargs.get('cut_b')
cut_a = kwargs.get('cut_a')
odirname = kwargs.get('odirname')
ofile = kwargs.get('ofile')
normalize = kwargs.get('normalize')
os.chdir('{}'.format(dirname))
fig,ax = plt.subplots(1,1,figsize=[5,8])
st = obspy.read('{}'.format(filename))
for tr in st:
align_time = tr.stats.sac[align]
start = tr.stats.sac['b']
az = tr.stats.sac['az']
delta = tr.stats.sac['delta']
if align_time == '':
print('No {} in the header file of {}'.format(align,tr.id))
continue
if start > align_time:
print("Time window exceeds the range of {}.".format(tr.id))
continue
N1 = int((-1*start + align_time - cut_b)/delta)
N2 = int((-1*start + align_time + cut_a)/delta)
t = np.linspace(-1*cut_b,cut_a,num=len(tr.data[N1:N2]))
if normalize == "True":
if max(tr.data[N1:N2]) - min(tr.data[N1:N2]) != 0:
ax.plot(t, tr.data[N1:N2]/(max(tr.data[N1:N2]) - min(tr.data[N1:N2]))*8 + az, lw=0.4,c='k',alpha=0.5)
ax.scatter(0,tr.data[int((-1*start+align_time)/delta)]/(max(tr.data[N1:N2]) - min(tr.data[N1:N2]))*8+az,marker='o',color='red',s=0.5)
elif normalize == "False":
ax.plot(t,tr.data[N1:N2]*8*1e-4+az,lw=0.4,c='k',alpha=0.3)
ax.scatter(0,tr.data[int((-1*start+align_time)/delta)]*8*1e-4+az,marker='o',color='red',s=0.5)
else:
print("Please specify normalize option: True or False!")
ax.set_xlim([-1*cut_b-10,cut_a+10])
ax.set_ylim([-10,370])
ax.set_xlabel('Time (s) aligned to {}'.format(phase))
ax.set_ylabel('Azimuth (deg)')
plt.savefig('{}/{}'.format(odirname,ofile))
def main():
import numpy as np
import pandas as pd
current_path = '/home/meichen/Research/SH_TOMO/USArray'
path = '/home/meichen/work1/SH_TOMO/events'
events_cat = pd.read_csv('{}/events_cat.txt'.format(current_path),skipinitialspace=True,header=None,sep=' ')
events_cat = np.array(events_cat)
for eventid in events_cat[0:1,0]:
plot_seismograms_gcarc(dirname='{}/event_{}/waveforms/SAC_files'.format(path,eventid),filename='*BHZ*.markP',align='t2',phase='P',cut_b=50,cut_a=200,odirname=current_path,ofile='event_{}_gcarc.pdf'.format(eventid),normalize='False')
plot_seismograms_gcarc(dirname='{}/event_{}/waveforms/SAC_files'.format(path,eventid),filename='*BHZ*.markP',align='t2',phase='P',cut_b=50,cut_a=200,odirname=current_path,ofile='event_{}_gcarc_nor.pdf'.format(eventid),normalize='True')
plot_seismograms_az(dirname='{}/event_{}/waveforms/SAC_files'.format(path,eventid),filename='*BHZ*.markP',align='t2',phase='P',cut_b=50,cut_a=200,odirname=current_path,ofile='event_{}_az.pdf'.format(eventid),normalize='False')
plot_seismograms_az(dirname='{}/event_{}/waveforms/SAC_files'.format(path,eventid),filename='*BHZ*.markP',align='t2',phase='P',cut_b=50,cut_a=200,odirname=current_path,ofile='event_{}_az_nor.pdf'.format(eventid),normalize='True')
main()
| UTF-8 | Python | false | false | 6,791 | py | 861 | plot_seismograms.py | 23 | 0.606391 | 0.582241 | 0 | 166 | 39.909639 | 243 |
qubearfarmer/Quantum_optics | 9,320,079,032,953 | a1eafd14c51b77d0303f855de9c67e8ded3094f6 | 97a288e2debdea256af56617e75d17d84ca2de17 | /Pulse_width.py | e82a915f541f2a25c14ef8d90de7068da9b3f347 | []
| no_license | https://github.com/qubearfarmer/Quantum_optics | bb9dbdb6285b36cbb92dd90de2e13ddd901e7a2f | fad370f6879e00596110bd2932c1bd958a74b918 | refs/heads/master | 2023-02-05T04:44:44.192985 | 2021-06-18T23:17:27 | 2021-06-18T23:17:27 | 68,663,235 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from matplotlib import pyplot as plt
def gaussian_shape(x, sigma, T_gate):
width = sigma*T_gate
x_0 = width * 2
y = np.zeros_like(x)
for idx, t in enumerate (x):
if t<=T_gate:
y[idx] = np.exp(-0.5*(t-x_0)**2/width**2)
return y
def gaussian_haonan(x, width):
sigma = width / np.sqrt(2 * np.pi)
y = np.zeros_like(x)
y_offset = np.exp(-(-width)**2/(2*sigma**2))
rescale_factor = 1/(1-y_offset)
for idx, t in enumerate(x):
if t<=2*width:
y[idx] = (np.exp(-(x[idx]-width)**2/(2*sigma**2))-y_offset)*rescale_factor
return y
def gaussian_flattop_haonan(x, width, T_flat):
sigma = width / np.sqrt(2 * np.pi)
y = np.zeros_like(x)
y_offset = np.exp(-(-width) ** 2 / (2 * sigma ** 2))
rescale_factor = 1 / (1 - y_offset)
for idx, t in enumerate(x):
if t <= width:
y[idx] = (np.exp(-(x[idx] - width) ** 2 / (2 * sigma ** 2)) - y_offset)*rescale_factor
elif t>width and t<width +T_flat:
y[idx] = 1
elif t>=width +T_flat and t <= 2*width +T_flat:
y[idx] = (np.exp(-(x[idx] - width-T_flat) ** 2 / (2 * sigma ** 2))-y_offset)*rescale_factor
return y
def gaussian_flattop(x, sigma, T_edge, T_gate):
width = sigma * T_edge
T_flat = T_gate - T_edge
t_0 = sigma ** -1 * 0.5 * width
xi_x = np.zeros_like(x)
for idx, t in enumerate(x):
if t <= t_0:
xi_x[idx] = np.exp(-0.5 * (t - t_0) ** 2 / width ** 2)
elif (t > t_0) and (t <= T_gate - t_0):
xi_x[idx] = 1
elif (t > T_gate - t_0) and (t <= T_gate):
xi_x[idx] = np.exp(-0.5 * (t - (t_0 + T_flat)) ** 2 / width ** 2)
else:
xi_x[idx] = 0
return xi_x
def drag_gaussian(x, sigma, T_gate, beta):
width = sigma * T_gate
x_0 = width * 2
y = np.zeros_like(x)
for idx, t in enumerate(x):
if t <= T_gate:
y[idx] = -np.exp(-0.5 * (t - x_0) ** 2 / width ** 2)*(t-x_0)/width**2
return y
def drag_gaussian1(x, sigma, T_gate, beta):
y = np.zeros_like(x)
width = sigma * T_gate
x_0 = width * 2
for idx, t in enumerate(x):
if t<=T_gate:
y[idx] = -np.exp(-0.5 * (t - x_0) ** 2 / width ** 2) * (t -x_0)/width**2
return y*beta
def drag_gaussian_flattop(x, sigma, T_edge, T_gate, beta):
y = gaussian_flattop(x, sigma, T_edge, T_gate)
y_drag = beta * np.gradient(y)
for idx, t in enumerate(x):
if t >= T_gate:
y_drag[idx] = 0
return y_drag
def drag_gaussian_flattop1(x, sigma, T_edge, T_gate, beta):
width = sigma * T_edge
T_flat = T_gate - T_edge
t_0 = sigma ** -1 * 0.5 * width
xi_x = np.zeros_like(x)
xi_y = np.zeros_like(x)
y_offset = np.exp(-(-width) ** 2 / (2 * sigma ** 2))
rescale_factor = 1 / (1 - y_offset)
for idx, t in enumerate(x):
if t <= t_0:
xi_x[idx] = (np.exp(-0.5 * (t - t_0) ** 2 / width ** 2)-y_offset)*rescale_factor
xi_y[idx] = -rescale_factor*np.exp(-0.5 * (t - t_0) ** 2 / width ** 2) * (t - t_0) / width ** 2
elif (t > t_0) and (t <= T_gate - t_0):
xi_x[idx] = 1
elif (t > T_gate - t_0) and (t <= T_gate):
xi_x[idx] = (np.exp(-0.5 * (t - (t_0 + T_flat)) ** 2 / width ** 2)-y_offset)*rescale_factor
xi_y[idx] = -rescale_factor*np.exp(-0.5 * (t - (t_0 + T_flat)) ** 2 / width ** 2) * (t - (t_0 + T_flat)) / width ** 2
return xi_y*beta
def drag_gaussian_flattop_haonan(x, width, T_flat, alpha):
xi_x = np.zeros_like(x)
xi_y = np.zeros_like(x)
amplitude = 1
sigma = width / np.sqrt(2 * np.pi)
y_offset = np.exp(-(-width) ** 2 / (2 * sigma ** 2))
rescale_factor = 1.0 / (1.0 - y_offset)
for idx, t in enumerate (x):
if t <= width:
xi_x[idx] = (np.exp(-0.5 * (t - width) ** 2 / sigma ** 2) - y_offset) * rescale_factor
xi_y[idx] = (-np.exp(-0.5 * (t - width) ** 2 / sigma ** 2) * (t - width) / sigma ** 2) * rescale_factor
elif (t > width) and (t < width + T_flat):
xi_x[idx] = 1
xi_y[idx] = 0
elif (t >= width + T_flat) and (t <= 2 * width + T_flat):
xi_x[idx] = (np.exp(-0.5 * (t - width - T_flat) ** 2 / sigma ** 2) - y_offset) * rescale_factor
xi_y[idx] = (-np.exp(-0.5 * (t - width - T_flat) ** 2 / sigma ** 2) * (
t - width - T_flat) / sigma ** 2) * rescale_factor
else:
xi_x[idx] = 0
xi_y[idx] = 0
xi_y = alpha * xi_y
return xi_x, xi_y
x = np.linspace(0,200,201)
# y = np.linspace (1000, 1500, 101)
# print (y[np.where(300>x>200)])
freq = 5
# plt.plot(x, gaussian_shape(x, 0.25, 40), linewidth = 2.0)
# plt.plot(x, gaussian_shape(x, 0.25, 80)*np.cos(2*np.pi*freq*x))
# plt.plot(x, gaussian_haonan(x, 10), linewidth = 2.0)
# plt.plot(x, gaussian_flattop(x, 0.25, 40, 100))
# plt.plot(x, gaussian_flattop_haonan(x, 10, 50))
# plt.plot(x, drag_gaussian(x, 0.25, 40, 1)/3)
# plt.plot(x, np.gradient(gaussian_shape(x, 0.25, 40)))
# plt.plot(x, drag_gaussian(x, 0.25, 40, 1)/np.gradient(gaussian_shape(x, 0.25, 40)))
# plt.plot(x, gaussian_shape(x, 0.25, 80))
# plt.plot(x[1:], np.diff(gaussian_shape(x, 0.25, 80)))
# plt.plot(x, drag_gaussian1(x, 0.25, 80, 1.9))
# plt.plot(x, drag_gaussian_flattop(x, 0.25, 80, 200, 1.9))
# plt.plot(x, drag_gaussian_flattop1(x, 0.25, 40, 100, 1))
plt.plot(x, drag_gaussian_flattop_haonan(x, 20, 70, 1.9)[0], label = 'Long pulse')
plt.plot(x, drag_gaussian_flattop_haonan(x, 20, 70, 1.9)[1], label = 'Long DRAG')
plt.plot(x, 1.9*np.gradient(drag_gaussian_flattop_haonan(x, 20, 70, 1.9)[0]), label = 'Long gradient')
# plt.tick_params(labelsize = 18)
t = np.linspace(0, 200, 201)
num_t = len(t)
t_step = t[1] - t[0]
t0_ind = int(num_t / 2)
t0 = t[t0_ind]
width = 20
truncation_range = 2
plateau = 70
total_duration = truncation_range * width + plateau
std = width / np.sqrt(2 * np.pi)
values1 = np.exp(-(t - t0 - plateau / 2) ** 2 / (2 * std ** 2))
values1[t < (t0 + plateau / 2)] = 1
values2 = np.exp(-(t - t0 + plateau / 2) ** 2 / (2 * std ** 2))
values2[t > (t0 - plateau / 2)] = 1
values = values1 * values2
# plateau_ind = (t > (t0 - plateau / 2)) * (t < (t0 + plateau / 2))
# values[plateau_ind] = values.max()
# plt.plot(t, np.exp(-(t - t0 - plateau / 2) ** 2 / (2 * std ** 2)))
values[t < (t0 - total_duration / 2)] = 0
values[t > (t0 + total_duration / 2)] = 0
non_zero_value = values[values != 0]
values = values - non_zero_value.min()
values[t < (t0 - total_duration / 2)] = 0
values[t > (t0 + total_duration / 2)] = 0
pulse_envelope = values / values.max()
plt.plot(t,pulse_envelope, label = 'Haonan pulse')
plt.plot(t,1.9*np.gradient(pulse_envelope), label = 'Haonan gradient')
plt.legend()
plt.show() | UTF-8 | Python | false | false | 6,803 | py | 26 | Pulse_width.py | 22 | 0.529472 | 0.478318 | 0 | 172 | 38.55814 | 129 |
bh107/bohrium | 17,411,797,444,022 | 8151fdc8582119bcfe4ed284f352703cae4f2974 | a227da58e9c3db9c35b69f430f5c1131fd18def1 | /ipython-magic.py | b0da90c7fedff5defc1721f684f37398112ede2c | [
"Apache-2.0"
]
| permissive | https://github.com/bh107/bohrium | cb8eb237b1b1a39048f58434cf16103917952533 | 5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | refs/heads/master | 2021-05-22T03:25:21.004506 | 2020-11-16T18:59:31 | 2020-11-16T18:59:31 | 33,190,852 | 240 | 43 | Apache-2.0 | false | 2020-11-16T18:59:32 | 2015-03-31T14:43:32 | 2020-10-14T12:54:55 | 2020-11-16T18:59:32 | 33,997 | 202 | 33 | 45 | C++ | false | false | ####################################
# This file was created by Bohrium.
# It allows you to run NumPy code (cells) as Bohrium, by using the magic command
# `%%bohrium` in your cells, e.g.:
#
# %%bohrium
# print(numpy)
# print(numpy.arange(10))
####################################
from IPython.core.magic import register_cell_magic
try:
import bohrium
have_bohrium = True
@bohrium.replace_numpy
def execute(__code):
exec(__code, globals(), locals())
__excludes = set(["__excludes", "__code", "np", "bohrium"])
try:
# Python 2.x
for key, value in locals().iteritems():
if key not in __excludes:
globals()[key] = value
except:
# Python 3.x
for key, value in locals().items():
if key not in __excludes:
globals()[key] = value
except ImportError:
warning_shown = False # Warning about missin bohrium has been shown
def execute(__code):
global warning_shown
if not warning_shown:
print("WARNING: Module bohrium could not be imported.\n"
" The magic command '%%bohrium' will have no effect.")
warning_shown = True
exec(__code, globals())
@register_cell_magic
def bohrium(line, cell):
# Code must end with \n
code = cell if cell.endswith("\n") else cell + "\n"
execute(code)
return
| UTF-8 | Python | false | false | 1,459 | py | 396 | ipython-magic.py | 221 | 0.533927 | 0.531186 | 0 | 51 | 27.607843 | 80 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.