repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
boatpand/coronadailyth | 12,481,174,965,010 | a59fdeea72fd77391fe4f3ef2dc922d4eaa515f5 | d6a9c305401e8c2b62b980fbdb7ef71fbd490303 | /app.py | f8fffaef8e47b4cc3d2ba2056919e51cea5e5357 | []
| no_license | https://github.com/boatpand/coronadailyth | a2628f061973d806279d816f05d5af704d5d20d8 | ff0288188fb2587fa2403b32b01dd7641368dbb5 | refs/heads/main | 2023-06-05T20:15:51.485621 | 2021-06-21T06:52:48 | 2021-06-21T06:52:48 | 378,802,845 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
import requests
import random
import datetime
import schedule
app = Flask(__name__)
URL = "https://corona.lmao.ninja/v2/countries/THA"
response = requests.get(URL)
data = response.json()
# line webhook : https://coronadailyth.herokuapp.com/webhook
line_bot_api = LineBotApi('RfDEGr9qfOkcWQ6zB4kQMdjuq8d7F+4GK1rGS4Ncx5j8uzGoiqPgm6II3WeC07q4mo7oJDyl5BV10KOi4PUVboYQS3VOSAF4CdWqt8Gb91xCdO/ntyiMasuBL1gp2CMKjtxVvxOCWWaWtC3FmBBKNAdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('60155d582adb7f366c127dd4237c5ac7')
####################### new ########################
@app.route('/')
def index():
return "Hello world!"
@app.route('/webhook', methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
now = datetime.datetime.now() + datetime.timedelta(0.291667)
date = now.strftime("%d/%m/%Y")
t = now.strftime("%X")
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
if event.message.text == "covid":
msg0 = "ข้อมูลประจำวันที่ " + str(date) + " เวลา " + str(t) + '\n'
msg1 = "ผู้ติดเชื้อวันนี้ " + str(data["todayCases"]) + " ราย" + "\n"
msg2 = "ผู้ติดเชื้อสะสม " + str(data["cases"]) + " ราย" + "\n"
msg3 = "ผู้เสียชีวิตวันนี้ " + str(data["todayDeaths"]) + " ราย" + "\n"
msg4 = "ผู้เสียชีวิตสะสม " + str(data["deaths"]) + " ราย" + "\n"
msg5 = "หายป่วยวันนี้ " + str(data["todayRecovered"]) + " ราย" + "\n"
msg6 = "หายป่วยสะสม " + str(data["recovered"]) + " ราย"
msg = msg0 + msg1 + msg2 + msg3 + msg4 + msg5 + msg6
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=msg))
else:
msg1 = "สถานการณ์ covid-19 โดย กรมควบคุมโรค" + "\n" + "https://ddc.moph.go.th/viralpneumonia/index.php"
msg2 = "วิธีป้องกัน covid-19 โดย โรงพยาบาลศิครินทร์" + '\n' + "https://www.sikarin.com/content/detail/408/%E0%B8%A7%E0%B8%B4%E0%B8%98%E0%B8%B5%E0%B8%81%E0%B8%B2%E0%B8%A3%E0%B8%9B%E0%B9%89%E0%B8%AD%E0%B8%87%E0%B8%81%E0%B8%B1%E0%B8%99-%E0%B8%A3%E0%B8%B1%E0%B8%9A%E0%B8%A1%E0%B8%B7%E0%B8%AD-%E0%B9%84%E0%B8%A7%E0%B8%A3%E0%B8%B1%E0%B8%AA-covid-19"
msg3 = "อาการ covid-19 โดย Kapook!" + "\n" + "https://covid-19.kapook.com/view224756.html"
msg4 = "รู้ให้ชัดก่อนฉีดวัคซีน COVID-19" + "\n" + "https://www.bangkokhospital.com/content/know-well-before-getting-the-covid-19-vaccine"
msg5 = 'หากต้องการทราบรายละเอียดผู้ติดเชื้อวันนี้พิมพ์ "covid" หรือกด menu "covid"'
msg6 = 'หากต้องการทราบรายงานความก้าวหน้าการให้บริการฉีดวัคซีนโควิด 19 กด menu "vaccine"'
mylist = [msg1,msg2,msg3,msg4,msg5,msg6]
msg = random.choice(mylist)
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=msg))
if __name__ == '__main__':
app.run(debug=True) | UTF-8 | Python | false | false | 4,050 | py | 4 | app.py | 1 | 0.644287 | 0.586572 | 0 | 80 | 41.4625 | 351 |
WoefulWolf/NieR2Blender_2.8 | 10,204,842,339,625 | 22951ed6361e18779f399eb0b2a41f5801e184a7 | d0fb36f4ecf10c5de1ad23d09af93cac50bf2521 | /dat_unpacker.py | b9a4ecb137ce370815a298573085c944cff900fe | []
| no_license | https://github.com/WoefulWolf/NieR2Blender_2.8 | 474777266425945d174641f25e2cd319f17d5718 | 8f3d6360baa287ad4ed7d0d486c5cb06723e1e94 | refs/heads/master | 2020-07-18T22:05:50.184706 | 2020-06-19T09:19:50 | 2020-06-19T09:19:50 | 206,321,535 | 9 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #encoding = utf-8
import os
import sys
import struct
from .util import to_int
def little_endian_to_float(bs):
return struct.unpack("<f", bs)[0]
def little_endian_to_int(bs):
return int.from_bytes(bs, byteorder='little')
def create_dir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def read_header(fp):
Magic = fp.read(4)
if list(Magic) == [68, 65, 84, 0]:
FileCount = little_endian_to_int(fp.read(4))
FileTableOffset = little_endian_to_int(fp.read(4))
ExtensionTableOffset = little_endian_to_int(fp.read(4))
NameTableOffset = little_endian_to_int(fp.read(4))
SizeTableOffset = little_endian_to_int(fp.read(4))
hashMapOffset = little_endian_to_int(fp.read(4))
print(
'''FileCount: %08x
FileTableOffset: %08x
ExtensionTableOffset:%08x
NameTableOffset:%08x
SizeTableOffset:%08x
hashMapOffset:%08x
'''%
(FileCount, FileTableOffset, ExtensionTableOffset,NameTableOffset,SizeTableOffset,hashMapOffset)
)
return (FileCount, FileTableOffset, ExtensionTableOffset,NameTableOffset,SizeTableOffset,hashMapOffset)
else:
print('[-] error magic number detected')
return False
def get_fileinfo(fp, index, FileTableOffset, ExtensionTableOffset, NameTableOffset, SizeTableOffset):
fp.seek(FileTableOffset + index * 4)
FileOffset = little_endian_to_int(fp.read(4))
fp.seek(ExtensionTableOffset + index * 4)
Extension = fp.read(4).decode('utf-8')
fp.seek(SizeTableOffset + index * 4)
Size = little_endian_to_int(fp.read(4))
fp.seek(NameTableOffset)
FilenameAlignment = little_endian_to_int(fp.read(4))
i = 0
while i < index:
if list(fp.read(FilenameAlignment))[FilenameAlignment-1] == 0:
i += 1
Filename = fp.read(256).split(b'\x00')[0].decode('ascii')
print(
'''
FileIndex: %d
Filename: %s
FileOffset: %08x
Size: %08x
Extension: %s'''%
(index,Filename,FileOffset,Size,Extension)
)
return index,Filename,FileOffset,Size,Extension
def extract_file(fp, filename, FileOffset, Size, extract_dir):
create_dir(extract_dir)
fp.seek(FileOffset)
FileContent = fp.read(Size)
outfile = open(extract_dir + '/'+filename,'wb')
print("extracting file %s to %s/%s"%(filename,extract_dir,filename))
outfile.write(FileContent)
outfile.close()
if filename.find('wtp') > -1 and False: # Removed due to not needed anymore when using Blender DTT import.
wtp_fp = open(extract_dir + '/'+filename,"rb")
content = wtp_fp.read(Size)
dds_group = content.split(b'DDS ')
dds_group = dds_group[1:]
for i in range(len(dds_group)):
print("unpacking %s to %s/%s"%(filename,extract_dir ,filename.replace('.wtp','_%d.dds'%i)))
dds_fp = open(extract_dir + '/'+filename.replace('.wtp','_%d.dds'%i), "wb")
dds_fp.write(b'DDS ')
dds_fp.write(dds_group[i])
dds_fp.close()
wtp_fp.close()
#os.remove("%s/%s"%(extract_dir,filename))
print("done")
def get_all_files(path):
pass
def extract_hashes(fp, extract_dir, FileCount, hashMapOffset, fileNamesOffset):
create_dir(extract_dir)
# file_order.metadata
# Filename Size
fp.seek(fileNamesOffset)
fileNameSize = little_endian_to_int(fp.read(4))
# Filenames
fileNames = []
for i in range(FileCount):
fileNames.append(fp.read(fileNameSize))
# Extraction
filename = 'file_order.metadata'
extract_dir_sub = extract_dir + '\\' + filename
outfile = open(extract_dir_sub,'wb')
# Header
outfile.write(struct.pack('<i', FileCount))
outfile.write(struct.pack('<i', fileNameSize))
#Filenames
for fileName in fileNames:
outfile.write(fileName)
outfile.close()
# hash_data.metadata
# Header
fp.seek(hashMapOffset)
preHashShift = to_int(fp.read(4))
bucketOffsetsOffset = to_int(fp.read(4))
hashesOffset = to_int(fp.read(4))
fileIndicesOffset = to_int(fp.read(4))
# Bucket Offsets
fp.seek(hashMapOffset + bucketOffsetsOffset)
bucketOffsets = []
while fp.tell() < (hashMapOffset + hashesOffset):
bucketOffsets.append(to_int(fp.read(2)))
# Hashes
fp.seek(hashMapOffset + hashesOffset)
hashes = []
for i in range(FileCount):
hashes.append(fp.read(4))
# File Indices
fp.seek(hashMapOffset + fileIndicesOffset)
fileIndices = []
for i in range(FileCount):
fileIndices.append(to_int(fp.read(2)))
# Extraction
filename = 'hash_data.metadata'
extract_dir_sub = extract_dir + '\\' + filename
outfile = open(extract_dir_sub,'wb')
# Header
outfile.write(struct.pack('<i', preHashShift))
outfile.write(struct.pack('<i', bucketOffsetsOffset))
outfile.write(struct.pack('<i', hashesOffset))
outfile.write(struct.pack('<i', fileIndicesOffset))
# Bucket Offsets
for i in bucketOffsets:
print(bucketOffsets)
outfile.write(struct.pack('<H', i))
# Hashes
for i in hashes:
outfile.write(i)
# File Indices
for i in fileIndices:
print(i)
outfile.write(struct.pack('<H', i))
outfile.close()
def main(filename, extract_dir, ROOT_DIR):
fp = open(filename,"rb")
headers = read_header(fp)
if headers:
FileCount, FileTableOffset, ExtensionTableOffset,NameTableOffset,SizeTableOffset,hashMapOffset = headers
for i in range(FileCount):
extract_dir_sub = ''
index,Filename,FileOffset,Size,Extension = get_fileinfo(fp, i, FileTableOffset,ExtensionTableOffset, NameTableOffset,SizeTableOffset)
if extract_dir != '':
extract_dir_sub = extract_dir + '\\' + filename.replace(ROOT_DIR ,'')
extract_file(fp, Filename, FileOffset, Size, extract_dir_sub)
extract_hashes(fp, extract_dir, FileCount, hashMapOffset, NameTableOffset)
return Filename
if __name__ == '__main__':
extract_dir = ''
dirname = ''
useage = "\nUseage:\npython dat_unpacker.py your_dat_path your_extract_path"
useage1 = "\nUseage:\nblender --background --python dat_unpacker.py your_dat_path your_extract_path"
if len(sys.argv) < 3:
print(useage)
exit()
if len(sys.argv) > 2:
dir_name = sys.argv[1]
extract_dir = sys.argv[2]
print()
if os.path.split(sys.argv[0])[-1].lower().find("blender") >-1:
if len(sys.argv) < 6:
print(useage1)
exit()
dir_name = sys.argv[4]
extract_dir = sys.argv[5]
if not os.path.exists(extract_dir):
create_dir(extract_dir)
ROOT_DIR = dir_name
for dirpath,dirnames,filename in os.walk(dir_name):
for file in filename:
filename = "%s\%s"%(dirpath,file)
main(filename, extract_dir, ROOT_DIR)
| UTF-8 | Python | false | false | 6,225 | py | 8 | dat_unpacker.py | 6 | 0.704739 | 0.693173 | 0.000161 | 216 | 27.819444 | 136 |
matelukas/workshop | 19,662,360,298,825 | 5377cbb0d581815cd87e3d203f30a2667ff9a3af | ff485c0282ae061fb897b4e8da6d8bc71cadd9c5 | /curtin/maas/curtin/url_helper.py | f3a541833c3a0c6b241ca712647e10ad63f08013 | []
| no_license | https://github.com/matelukas/workshop | 8fbc55bbf108c668380f78b36522e98189af79aa | e50ce0e3cc3f6278526de8e056d94b0e85d9fc40 | refs/heads/master | 2018-02-15T02:22:29.980107 | 2016-10-25T08:34:13 | 2016-10-25T08:34:13 | 67,415,537 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from email.utils import parsedate
import json
import os
import socket
import sys
import time
import uuid
from functools import partial
try:
from urllib import request as _u_re # pylint: disable=no-name-in-module
from urllib import error as _u_e # pylint: disable=no-name-in-module
from urllib.parse import urlparse # pylint: disable=no-name-in-module
urllib_request = _u_re
urllib_error = _u_e
except ImportError:
# python2
import urllib2 as urllib_request
import urllib2 as urllib_error
from urlparse import urlparse # pylint: disable=import-error
from .log import LOG
error = urllib_error
class _ReRaisedException(Exception):
exc = None
"""this exists only as an exception type that was re-raised by
an exception_cb, so code can know to handle it specially"""
def __init__(self, exc):
self.exc = exc
def _geturl(url, headers=None, headers_cb=None, exception_cb=None, data=None):
def_headers = {'User-Agent': 'Curtin/0.1'}
if headers is not None:
def_headers.update(headers)
headers = def_headers
if headers_cb:
headers.update(headers_cb(url))
if data and isinstance(data, dict):
data = json.dumps(data).encode()
try:
req = urllib_request.Request(url=url, data=data, headers=headers)
r = urllib_request.urlopen(req).read()
# python2, we want to return bytes, which is what python3 does
if isinstance(r, str):
return r.decode()
return r
except urllib_error.HTTPError as exc:
myexc = UrlError(exc, code=exc.code, headers=exc.headers, url=url,
reason=exc.reason)
except Exception as exc:
myexc = UrlError(exc, code=None, headers=None, url=url,
reason="unknown")
if exception_cb:
try:
exception_cb(myexc)
except Exception as e:
myexc = _ReRaisedException(e)
raise myexc
def geturl(url, headers=None, headers_cb=None, exception_cb=None,
data=None, retries=None, log=LOG.warn):
"""return the content of the url in binary_type. (py3: bytes, py2: str)"""
if retries is None:
retries = []
curexc = None
for trynum, naptime in enumerate(retries):
try:
return _geturl(url=url, headers=headers, headers_cb=headers_cb,
exception_cb=exception_cb, data=data)
except _ReRaisedException as e:
raise curexc.exc
except Exception as e:
curexc = e
if log:
msg = ("try %d of request to %s failed. sleeping %d: %s" %
(naptime, url, naptime, curexc))
log(msg)
time.sleep(naptime)
try:
return _geturl(url=url, headers=headers, headers_cb=headers_cb,
exception_cb=exception_cb, data=data)
except _ReRaisedException as e:
raise e.exc
class UrlError(IOError):
def __init__(self, cause, code=None, headers=None, url=None, reason=None):
IOError.__init__(self, str(cause))
self.cause = cause
self.code = code
self.headers = headers
if self.headers is None:
self.headers = {}
self.url = url
self.reason = reason
def __str__(self):
if isinstance(self.cause, urllib_error.HTTPError):
msg = "http error: %s" % self.cause.code
elif isinstance(self.cause, urllib_error.URLError):
msg = "url error: %s" % self.cause.reason
elif isinstance(self.cause, socket.timeout):
msg = "socket timeout: %s" % self.cause
else:
msg = "Unknown Exception: %s" % self.cause
return "[%s] " % self.url + msg
class OauthUrlHelper(object):
def __init__(self, consumer_key=None, token_key=None,
token_secret=None, consumer_secret=None,
skew_data_file="/run/oauth_skew.json"):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret or ""
self.token_key = token_key
self.token_secret = token_secret
self.skew_data_file = skew_data_file
self._do_oauth = True
self.skew_change_limit = 5
required = (self.token_key, self.token_secret, self.consumer_key)
if not any(required):
self._do_oauth = False
elif not all(required):
raise ValueError("all or none of token_key, token_secret, or "
"consumer_key can be set")
old = self.read_skew_file()
self.skew_data = old or {}
def __str__(self):
fields = ['consumer_key', 'consumer_secret',
'token_key', 'token_secret']
masked = fields
def r(name):
if not hasattr(self, name):
rval = "_unset"
else:
val = getattr(self, name)
if val is None:
rval = "None"
elif name in masked:
rval = '"%s"' % ("*" * len(val))
else:
rval = '"%s"' % val
return '%s=%s' % (name, rval)
return ("OauthUrlHelper(" + ','.join([r(f) for f in fields]) + ")")
def read_skew_file(self):
if self.skew_data_file and os.path.isfile(self.skew_data_file):
with open(self.skew_data_file, mode="r") as fp:
return json.load(fp)
return None
def update_skew_file(self, host, value):
# this is not atomic
if not self.skew_data_file:
return
cur = self.read_skew_file()
if cur is None:
cur = {}
cur[host] = value
with open(self.skew_data_file, mode="w") as fp:
fp.write(json.dumps(cur))
def exception_cb(self, exception):
if not (isinstance(exception, UrlError) and
(exception.code == 403 or exception.code == 401)):
return
if 'date' not in exception.headers:
LOG.warn("Missing header 'date' in %s response", exception.code)
return
date = exception.headers['date']
try:
remote_time = time.mktime(parsedate(date))
except Exception as e:
LOG.warn("Failed to convert datetime '%s': %s", date, e)
return
skew = int(remote_time - time.time())
host = urlparse(exception.url).netloc
old_skew = self.skew_data.get(host, 0)
if abs(old_skew - skew) > self.skew_change_limit:
self.update_skew_file(host, skew)
LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
self.skew_data[host] = skew
return
def headers_cb(self, url):
if not self._do_oauth:
return {}
host = urlparse(url).netloc
clockskew = None
if self.skew_data and host in self.skew_data:
clockskew = self.skew_data[host]
return oauth_headers(
url=url, consumer_key=self.consumer_key,
token_key=self.token_key, token_secret=self.token_secret,
consumer_secret=self.consumer_secret, clockskew=clockskew)
def _wrapped(self, wrapped_func, args, kwargs):
kwargs['headers_cb'] = partial(
self._headers_cb, kwargs.get('headers_cb'))
kwargs['exception_cb'] = partial(
self._exception_cb, kwargs.get('exception_cb'))
return wrapped_func(*args, **kwargs)
def geturl(self, *args, **kwargs):
return self._wrapped(geturl, args, kwargs)
def _exception_cb(self, extra_exception_cb, exception):
ret = None
try:
if extra_exception_cb:
ret = extra_exception_cb(exception)
finally:
self.exception_cb(exception)
return ret
def _headers_cb(self, extra_headers_cb, url):
headers = {}
if extra_headers_cb:
headers = extra_headers_cb(url)
headers.update(self.headers_cb(url))
return headers
def _oauth_headers_none(url, consumer_key, token_key, token_secret,
consumer_secret, clockskew=0):
"""oauth_headers implementation when no oauth is available"""
if not any([token_key, token_secret, consumer_key]):
return {}
pkg = "'python3-oauthlib'"
if sys.version_info[0] == 2:
pkg = "'python-oauthlib' or 'python-oauth'"
raise ValueError(
"Oauth was necessary but no oauth library is available. "
"Please install package " + pkg + ".")
def _oauth_headers_oauth(url, consumer_key, token_key, token_secret,
consumer_secret, clockskew=0):
"""Build OAuth headers with oauth using given credentials."""
consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
token = oauth.OAuthToken(token_key, token_secret)
if clockskew is None:
clockskew = 0
timestamp = int(time.time()) + clockskew
params = {
'oauth_version': "1.0",
'oauth_nonce': uuid.uuid4().hex,
'oauth_timestamp': timestamp,
'oauth_token': token.key,
'oauth_consumer_key': consumer.key,
}
req = oauth.OAuthRequest(http_url=url, parameters=params)
req.sign_request(
oauth.OAuthSignatureMethod_PLAINTEXT(), consumer, token)
return(req.to_header())
def _oauth_headers_oauthlib(url, consumer_key, token_key, token_secret,
consumer_secret, clockskew=0):
"""Build OAuth headers with oauthlib using given credentials."""
if clockskew is None:
clockskew = 0
timestamp = int(time.time()) + clockskew
client = oauth1.Client(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=token_key,
resource_owner_secret=token_secret,
signature_method=oauth1.SIGNATURE_PLAINTEXT,
timestamp=str(timestamp))
uri, signed_headers, body = client.sign(url)
return signed_headers
oauth_headers = _oauth_headers_none
try:
# prefer to use oauthlib. (python-oauthlib)
import oauthlib.oauth1 as oauth1
oauth_headers = _oauth_headers_oauthlib
except ImportError:
# no oauthlib was present, try using oauth (python-oauth)
try:
import oauth.oauth as oauth
oauth_headers = _oauth_headers_oauth
except ImportError:
# we have no oauth libraries available, use oauth_headers_none
pass
# vi: ts=4 expandtab syntax=python
| UTF-8 | Python | false | false | 10,482 | py | 199 | url_helper.py | 71 | 0.587674 | 0.584526 | 0 | 315 | 32.27619 | 78 |
nyoshimizu/HTSymm | 661,424,998,541 | a5945e34a3a1cbaa04fa905e7bdf80d452ef891c | 9eb1271890591a13d207c59e5b991a29ff5b8780 | /analyze/delaydefpaths.py | 75f532115a495fcc6353b74d8105e0e18b570e5b | []
| no_license | https://github.com/nyoshimizu/HTSymm | 956203b6071a89e4929cb4f5b049b29bdfc98fda | ec66ef1fb88fb90af9cfc91bfb70ec421b54ce6e | refs/heads/master | 2021-01-21T04:53:53.697143 | 2016-07-22T23:14:19 | 2016-07-22T23:14:19 | 50,880,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This function determines the delay defining paths for a circuit. It requires a
db.VerilogDB to determine the delay-defining paths and net path delay for a
circuit given a certain input.
"""
from random import (
seed,
randrange
)
class rand_inputs:
"""
A generator for creating random inputs for all input pins.
pins is integer number of input pins in the circuit.
seed_num is integer seed for the random generator.
offset is integer number of PRNG numbers to skip before adding to returned
list of inputs.
Iterator returns rand_return, a string of binary with initial '0b removed
and padded to match number of bits as required by pins input.
Note that PRNG is initialized with seed when object is created, not when
iterator is called.
"""
def __init__(self, pins, seed_num, offset):
self.pins = pins
self.rand_size = 2 ** self.pins
self.seed_num = seed_num
self.offset = offset
seed(seed_num)
self.rand_return = ''
for k in range(seed_num):
self.rand_return = randrange(self.rand_size)
def __next__(self):
self.rand_return = randrange(self.rand_size)
self.rand_return = bin(self.rand_return)[2:]
self.rand_return.zfill(self.pins)
return self.rand_return
| UTF-8 | Python | false | false | 1,332 | py | 11 | delaydefpaths.py | 11 | 0.662913 | 0.660661 | 0 | 48 | 26.729167 | 78 |
Firefly8/ATC | 6,287,832,143,303 | 6342f44d3d308256ec99b49e719e4b66a0dc258f | 4b2d225b7a686f61cc51054f00f47af47a42e9f1 | /verification/testing/unit_tests/dp/connection_established_delete_test.py | 40194886f64c62cbddcf5c7094394e7d9d6aa9c8 | []
| no_license | https://github.com/Firefly8/ATC | e18deb1e6d31be1fa6e881dc858c0b61de6fa39a | 35872e9e3cb08688cfdde63053ccf1f43d7f0d36 | refs/heads/master | 2021-09-07T08:45:25.026285 | 2018-02-20T14:59:36 | 2018-02-20T14:59:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import sys
import os
import inspect
my_currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
my_parentdir = os.path.dirname(my_currentdir)
my_grandparentdir = os.path.dirname(my_parentdir)
sys.path.append(my_grandparentdir)
sys.path.append(my_parentdir)
#from test_infra import *
import random
from common_infra import *
from alvs_infra import *
from unit_tester import Unit_Tester
from alvs_players_factory import *
server_count = 1
client_count = 1
service_count = 1
class Connection_Established_Delete_Test(Unit_Tester):
def user_init(self, setup_num):
print "FUNCTION " + sys._getframe().f_code.co_name + " called"
self.test_resources = ALVS_Players_Factory.generic_init(setup_num, service_count, server_count, client_count)
w = 1
for s in self.test_resources['server_list']:
s.vip = self.test_resources['vip_list'][0]
s.weight = w
def change_config(self, config):
#need to start ezbox
config['start_ezbox'] = True
def run_user_test(self):
port = '80'
sched_algorithm = 'sh'
ezbox = self.test_resources['ezbox']
server_list = self.test_resources['server_list']
client_list = self.test_resources['client_list']
vip_list = self.test_resources['vip_list']
print "service %s is set with %s scheduling algorithm" %(vip_list[0],sched_algorithm)
ezbox.add_service(vip_list[0], port, sched_alg=sched_algorithm, sched_alg_opt='')
ezbox.add_server(server_list[0].vip, port, server_list[0].ip, port)
# create packet
data_packet = tcp_packet(mac_da=ezbox.setup['mac_address'],
mac_sa=client_list[0].mac_address.replace(':',' '),
ip_dst=ip_to_hex_display(vip_list[0]),
ip_src=ip_to_hex_display(client_list[0].ip),
tcp_source_port = '00 00',
tcp_dst_port = '00 50', # port 80
tcp_reset_flag = False,
tcp_fin_flag = False,
packet_length=64)
data_packet.generate_packet()
##########################################################################
# send packet to slow path (connection is not exist)
##########################################################################
# verify that connection is not exist
connection=ezbox.get_connection(ip2int(vip_list[0]), port, ip2int(client_list[0].ip) , 0, 6)
if connection != None:
print "ERROR, exist , fail\n"
exit(1)
# capture all packets on server
server_list[0].capture_packets_from_service(vip_list[0])
# send packet
print "Send Data Packet to Service"
client_list[0].send_packet_to_nps(data_packet.pcap_file_name)
# verify that connection exist
time.sleep(0.5)
connection=ezbox.get_connection(ip2int(vip_list[0]), port, ip2int(client_list[0].ip) , 0, 6)
if connection == None:
print "ERROR, connection was created, even though server is not exist\n"
exit(1)
print "Connection exist"
# check how many packets were captured
packets_received = server_list[0].stop_capture()
if packets_received !=1:
print "ERROR, wrong number of packets were received on server"
exit(1)
# wait 5 minutes
print "Wait 5 minutes and send data packet to this connection again"
time.sleep(60*5)
# verify that connection exist
connection=ezbox.get_connection(ip2int(vip_list[0]), port, ip2int(client_list[0].ip) , 0, 6)
if connection == None:
print "ERROR, connection was created, even though server is not exist\n"
exit(1)
# send data packet again
print "Send Data Packet to Service again"
client_list[0].send_packet_to_nps(data_packet.pcap_file_name)
# wait 5 minutes
print "Wait 16 minutes and check that connection still exist"
for i in range(16):
# verify that connection exist
connection=ezbox.get_connection(ip2int(vip_list[0]), port, ip2int(client_list[0].ip) , 0, 6)
if connection == None:
print "ERROR, connection was deleted..\n"
exit(1)
print "Connection Exist"
time.sleep(60)
time.sleep(20)
# check that connection was deleted
connection=ezbox.get_connection(ip2int(vip_list[0]), port, ip2int(client_list[0].ip) , 0, 6)
if connection != None:
print "ERROR, still exist, connection should be deleted..\n"
exit(1)
print "Connection was deleted after established time"
print
current_test = Connection_Established_Delete_Test()
current_test.main()
| UTF-8 | Python | false | false | 4,377 | py | 189 | connection_established_delete_test.py | 107 | 0.661869 | 0.642678 | 0 | 137 | 30.948905 | 111 |
mcoavoux/test | 18,107,582,132,628 | 292adec912fbb70a01e2ea3b17ac35b69df1ce3b | cdc27a38daee4f73d4c4830f9e20306aedc46676 | /utils/conll2text.py | 42e4fe115da0ec38f17dc6bf364eb371b0b5f21b | []
| no_license | https://github.com/mcoavoux/test | 50e2f084c933c907678287bbb63acdcb4472f968 | 9582b5efa547e2a877e19f191ba64edee1eee469 | refs/heads/master | 2021-04-27T00:27:16.772614 | 2019-10-03T11:33:39 | 2019-10-03T11:33:39 | 123,818,277 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
ID,FORM,LEMMA,CPOS,FPOS,MORPH,HEAD,DEPREL,PHEAD,PDEPREL=range(10)
def read_conll(filename) :
with open(filename) as f :
sentences = [[line.split("\t") for line in sen.split("\n") if line and line[0] != "#"] for sen in f.read().split("\n\n") if sen.strip()]
for i in range(len(sentences)):
sentences[i] = [t for t in sentences[i] if "-" not in t[ID]]
s = sentences[i]
#for tok in s :
#tok[ID] = int(tok[ID])
#tok[HEAD] = int(tok[HEAD])
return sentences
def main():
import argparse
usage = """Converts a COnllU corpus to raw text (1 sentence per line, tokens separated by spaces"""
parser = argparse.ArgumentParser(description = usage, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("input", help="Input (conllu)")
parser.add_argument("output", help="Output")
args = parser.parse_args()
corpus = read_conll(args.input)
out = open(args.output, "w")
for s in corpus:
tokens = [t[FORM] for t in s]
out.write(" ".join(tokens))
out.write("\n")
out.close()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,173 | py | 24 | conll2text.py | 19 | 0.587383 | 0.583973 | 0 | 39 | 29.025641 | 144 |
quidditymaster/thimbles | 1,065,151,908,260 | 1059efd9d104d97461fb97eea7c566058b82b4c1 | f3faf9d29bba85320dedd5cd2b452b0a1efe3c55 | /thimblesgui/__init__.py | 476455ec1c1649db5ea94506ae84076bfb15aaba | [
"MIT"
]
| permissive | https://github.com/quidditymaster/thimbles | 2f764e35aeaa76aa2136e24fe24d171630a1f277 | b122654a012f0eb4f043d1ee757f884707c97615 | refs/heads/master | 2021-01-11T23:26:33.694235 | 2017-05-14T14:32:36 | 2017-05-14T14:32:36 | 78,581,998 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import matplotlib as mpl
try:
from PyQt4 import QtGui
from PyQt4 import QtCore
mpl.use("Qt4Agg")
which_qt = "PyQt4"
except ImportError:
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
mpl.use("Qt5Agg")
which_qt = "PyQt5"
#mpl.rcParams['backend.qt4'] = which_qt
Qt = QtCore.Qt
#import thimbles as tmb
#style_file = os.path.join(tmb.resource_dir, "matplotlibrc")
#style_dict = mpl.rc_params_from_file(style_file)
#mpl.rcParams.update(style_dict)
#plt.style.use(style_file) #works only in later matplotlib versions
from . import selection
from . import active_collections
from thimblesgui.mplwidget import MatplotlibWidget
from thimblesgui.spec_widget import FluxDisplay
#from . import main_window
#from . import models
#from . import views
#import thimblesgui.grouping_editor
| UTF-8 | Python | false | false | 856 | py | 147 | __init__.py | 139 | 0.745327 | 0.733645 | 0 | 33 | 24.909091 | 67 |
MakerSpaceLeiden/AccesSystem | 18,064,632,472,273 | 7c767343429467a49e203a96b96addba35db19cf | b38c5eb548636af44b4c82fc2b57e3103114f2e0 | /Master/checkdb.py | c073b61546dce1f8d35ae2a184e786c0389a5631 | [
"LicenseRef-scancode-other-permissive",
"MIT",
"NTP",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-rsa-md4",
"Beerware",
"RSA-MD",
"HPND-sell-variant",
"Spencer-94",
"LicenseRef-scancode-zeusbench",
"metamail",
"Apache-2.0"
]
| permissive | https://github.com/MakerSpaceLeiden/AccesSystem | e652a343c86ac904de8a1a08adc13bbc3ee6de7d | 5e39572f51fafca71750660fcf5f737670f17d54 | refs/heads/master | 2023-05-27T01:44:57.893580 | 2023-05-16T11:27:48 | 2023-05-16T11:27:48 | 54,308,309 | 4 | 4 | Apache-2.0 | false | 2022-02-21T22:37:34 | 2016-03-20T08:46:08 | 2021-12-16T21:13:13 | 2022-02-21T22:37:33 | 18,697 | 4 | 6 | 1 | C++ | false | false | import mysql.connector
SQL ="""select active, members_tag.owner_id, first_name,last_name,email,members_tag.id from acl_entitlement
inner join acl_machine on acl_machine.node_machine_name = %s and acl_machine.requires_permit_id = acl_entitlement.permit_id
inner join members_tag on members_tag.tag = %s and acl_entitlement.holder_id = members_tag.owner_id
inner join members_user on members_user.id = members_tag.owner_id"""
SQL2 ="""select active, members_tag.owner_id, first_name,last_name,email, members_tag.id from acl_entitlement
inner join acl_machine on acl_machine.node_machine_name = %s and acl_machine.requires_permit_id = acl_entitlement.permit_id
inner join members_tag on members_tag.tag like %s and acl_entitlement.holder_id = members_tag.owner_id
inner join members_user on members_user.id = members_tag.owner_id"""
SQL3 = """select owner_id, first_name, last_name, email, members_tag.id from members_user, members_tag
where ( members_tag.tag = %s or members_tag.tag = %s ) and members_tag.owner_id = members_user.id """
cnx = mysql.connector.connect(option_files='/usr/local/makerspaceleiden-crm/makerspaceleiden/my.cnf')
cnx.autocommit = True
cursor = cnx.cursor()
for sql in [ SQL, SQL2, SQL3 ]:
print(sql)
cursor.execute(sql, ('byebye','4-225-254-122-220-63-128'))
for line in cursor.fetchall():
print(line)
| UTF-8 | Python | false | false | 1,495 | py | 172 | checkdb.py | 127 | 0.672241 | 0.657525 | 0 | 23 | 63.956522 | 136 |
the-tale/the-tale | 17,145,509,483,099 | 408c3adf4e8002fcc56d34ae4564e98713f4c5f4 | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/the_tale/the_tale/news/meta_relations.py | 1989a2e42fbf9d6557a990b001a7bb87b13b89e9 | [
"BSD-3-Clause"
]
| permissive | https://github.com/the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | false | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | 2023-02-06T14:06:56 | 2023-02-15T18:57:29 | 43,549 | 275 | 56 | 329 | Python | false | false |
import smart_imports
smart_imports.all()
class News(meta_relations_objects.MetaType):
__slots__ = ('caption', )
TYPE = 12
TYPE_CAPTION = 'Новость'
def __init__(self, caption, **kwargs):
super(News, self).__init__(**kwargs)
self.caption = caption
@property
def url(self):
return utils_urls.url('news:show', self.id)
@classmethod
def create_from_object(cls, news):
return cls(id=news.id, caption=news.caption)
@classmethod
def create_from_id(cls, id):
try:
news = models.News.objects.get(id=id)
except models.News.DoesNotExists:
raise meta_relations_exceptions.ObjectsNotFound(type=cls.TYPE, ids=[id])
return cls.create_from_object(news)
@classmethod
def create_from_ids(cls, ids):
news = models.News.objects.filter(id__in=ids)
if len(ids) != len(news):
raise meta_relations_exceptions.ObjectsNotFound(type=cls.TYPE, ids=ids)
return [cls.create_from_object(record) for record in news]
| UTF-8 | Python | false | false | 1,066 | py | 1,898 | meta_relations.py | 1,502 | 0.623229 | 0.621341 | 0 | 39 | 26.128205 | 84 |
thenetcircle/dino | 18,459,769,456,106 | f03ca08c6a6b5ac4da8d9532bd1db796585490d1 | 2b7180b739df298195e35a71e20a4251f83b4813 | /test/validation/test_request_message_integration.py | 3973358dedc9617740ff03b75685e7a6488c19dd | [
"Apache-2.0"
]
| permissive | https://github.com/thenetcircle/dino | 625f752046502a04ab9ec42b0a8c437d7123bcbb | f1f68954191f64cdec4b3914caf154300ccbf519 | refs/heads/master | 2023-08-10T09:59:07.064141 | 2023-08-03T07:56:19 | 2023-08-03T07:56:19 | 69,937,941 | 153 | 21 | Apache-2.0 | false | 2023-02-15T22:53:29 | 2016-10-04T05:40:09 | 2023-02-11T21:32:00 | 2023-02-15T22:53:27 | 66,421 | 141 | 6 | 11 | Python | false | false | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4 as uuid
from activitystreams import parse as as_parser
from dino.config import ApiActions
from dino.validation import request
from test.base import BaseTest
class RequestMessageIntegrationTest(BaseTest):
def test_send_message(self):
self.create_and_join_room()
response_data = request.on_message(as_parser(self.activity_for_message()))
self.assertEqual(True, response_data[0])
def test_send_message_without_actor_id(self):
self.create_and_join_room()
activity = self.activity_for_message()
del activity['actor']['id']
response_data = request.on_message(as_parser(activity))
self.assertEqual(False, response_data[0])
def test_send_message_without_target_id(self):
self.create_and_join_room()
activity = self.activity_for_message()
del activity['target']['id']
response_data = request.on_message(as_parser(activity))
self.assertEqual(False, response_data[0])
def test_send_message_without_being_in_room(self):
new_room_id = str(uuid())
self.create_room(room_id=new_room_id)
activity = self.activity_for_message()
activity['target']['objectType'] = 'room'
activity['target']['id'] = new_room_id
response_data = request.on_message(as_parser(activity))
self.assertEqual(False, response_data[0])
def test_send_message_non_existing_room(self):
new_room_id = str(uuid())
activity = self.activity_for_message()
activity['target']['objectType'] = 'room'
activity['target']['id'] = new_room_id
response_data = request.on_message(as_parser(activity))
self.assertEqual(False, response_data[0])
def test_send_cross_group(self):
new_room_id = str(uuid())
self.create_and_join_room()
self.create_channel_and_room(room_id=new_room_id, room_name='asdf')
self.remove_owner()
self.remove_owner_channel()
self.set_acl({ApiActions.CROSSROOM: {'samechannel': ''}}, room_id=new_room_id)
activity = self.activity_for_message()
activity['target']['objectType'] = 'room'
activity['target']['id'] = new_room_id
response_data = request.on_message(as_parser(activity))
self.assertEqual(True, response_data[0])
def test_send_cross_group_not_allowed(self):
new_room_id = str(uuid())
self.create_and_join_room()
self.create_channel_and_room(room_id=new_room_id, room_name='asdf')
self.remove_owner()
self.remove_owner_channel()
self.set_acl({ApiActions.CROSSROOM: {'disallow': ''}}, room_id=new_room_id)
activity = self.activity_for_message()
activity['target']['objectType'] = 'room'
activity['target']['id'] = new_room_id
activity['actor']['url'] = BaseTest.ROOM_ID
response_data = request.on_message(as_parser(activity))
self.assertEqual(False, response_data[0])
| UTF-8 | Python | false | false | 3,532 | py | 261 | test_request_message_integration.py | 217 | 0.660532 | 0.657135 | 0 | 89 | 38.685393 | 86 |
caspar/PhysicsLab | 7,387,343,787,960 | b67f48a679e78f7b58db5cc0dce24d1ae949f6a9 | 5fec9c2b93f62355727d1dd38145fa25fcd026a1 | /21_Midterm/blackbody.py | 049f3e789737f6f10d9e801f2d0db4a49de90dd3 | [
"MIT"
]
| permissive | https://github.com/caspar/PhysicsLab | 14a43a6e3e000d850dee18daae56a72f762766a7 | 1b4d45d9e915a84ecb80a39498850463bbc2d3be | refs/heads/master | 2021-01-10T13:29:15.332493 | 2016-05-09T00:22:28 | 2016-05-09T00:22:28 | 43,075,107 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import numpy as np
import matplotlib.pyplot as plt
# constants
h = 6.62607004e-34
hbar = h/(2*math.pi)
k_B = 1.38064852e-23
T = 293 # room temperature
c = 299792458
# range of values
wavelength = np.arange(0, 5e-5, 1e-7)
# spectral radiance
S = (8*math.pi*hbar*c /(wavelength**5)) / (math.e**(h*c/(wavelength*k_B*T))-1)
# label axes
plt.title("Radiance of a Blackbody")
plt.xlabel("Wavelength (m)")
plt.ylabel("Spectral Radiance (W/sr)")
plt.plot(wavelength, S)
plt.show()
| UTF-8 | Python | false | false | 500 | py | 102 | blackbody.py | 18 | 0.676 | 0.59 | 0 | 24 | 19.833333 | 78 |
jalondono/holbertonschool-machine_learning | 14,285,061,244,037 | b36d1e8d7b8e2d59e5d7e341b63544eb809c0970 | 17ef57d9f955dd7f85207ac4b5692751b651be3b | /supervised_learning/0x00-binary_classification/5-neuron.py | 12fb785ec0310c82d2076f9980655a1b2821c778 | []
| no_license | https://github.com/jalondono/holbertonschool-machine_learning | 4a51698c371856523f1a06062b5c608d32cb81fb | f83a60babb1d2a510a4a0e0f58aa3880fd9f93a7 | refs/heads/master | 2023-01-21T16:05:55.003248 | 2020-11-23T21:34:56 | 2020-11-23T21:34:56 | 255,702,357 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
""" Neuron """
import numpy as np
def sigmoid(Z):
"""Activation function of sigmoid neurone"""
sigma = (1.0 / (1.0 + np.exp(-Z)))
return sigma
class Neuron:
"""Neuron Class"""
def __init__(self, nx):
"""
constructor method
nx is the number of input features to the neuron
"""
if not isinstance(nx, int):
raise TypeError('nx must be an integer')
if nx < 1:
raise ValueError('nx must be a positive integer')
"""
W = The weights vector for the neuron. Upon instantiation
using a random normal distribution.
"""
self.__W = np.random.normal(0, 1, (1, nx))
"""The bias for the neuron. Upon instantiation,
it should be initialized to 0."""
self.__b = 0
"""The activated output of the neuron (prediction).
Upon instantiation, it should be initialized to 0."""
self.__A = 0
@property
def W(self):
"""Private instance of W"""
return self.__W
@property
def b(self):
"""Private instance of b"""
return self.__b
@property
def A(self):
"""Private instance of A"""
return self.__A
def forward_prop(self, X):
"""
Calculates the forward propagation of the neuron
:param X: is a numpy.ndarray with shape (nx, m)
that contains the input data
:return: The private attribute __A
"""
Z = np.matmul(self.__W, X) + self.__b
self.__A = sigmoid(Z)
return self.__A
def cost(self, Y, A):
"""
Calculates the cost of the model using logistic regression
:param Y: Contains the correct labels for the input data
:param A: containing the activated output of the neuron for each
:return: Cost
"""
m = Y.shape[1]
cost = -np.sum((Y * np.log(A)) +
((1 - Y) *
np.log(1.0000001 - A))) / m
return cost
def evaluate(self, X, Y):
"""
Evaluates the neuron’s predictions
:param X: that contains the input data
:param Y: contains the correct labels for the input data
:return: the neuron’s prediction and the cost of the network
"""
A = self.forward_prop(X)
cost = self.cost(Y, A)
Y_predict = np.where(A >= 0.5, 1, 0)
return Y_predict, cost
def gradient_descent(self, X, Y, A, alpha=0.05):
"""
Calculates one pass of gradient descent on the neuron
:param X: contains the input data
:param Y: contains the correct labels for the input data
:param A: containing the activated output of the neuron
for each example
:param alpha: is the learning rate
:return: Just Updates the private attributes __W and __b
"""
dz = A - Y
dw = np.matmul(X, dz.T) / dz.shape[1]
db = np.sum(dz) / dz.shape[1]
self.__W = self.__W - (alpha * dw.T)
self.__b = self.__b - (alpha * db)
| UTF-8 | Python | false | false | 3,111 | py | 222 | 5-neuron.py | 188 | 0.542002 | 0.531703 | 0 | 105 | 28.590476 | 72 |
Miguelflj/Prog1 | 4,793,183,521,084 | c10fb30035acca1a40cd54481e0deccefbb66804 | 4c29ce2483165a702cf8f78435b0581ac5290f02 | /CodesURI/ex1164.py | 1d7420e5f82734ea1c9e1182fdb03237db405a2d | []
| no_license | https://github.com/Miguelflj/Prog1 | eb2955f84e59f82482b722b4cccaac3bb9fa29d9 | 26bf67dd15a2e29d88831a9b22847ea70c823d21 | refs/heads/master | 2020-05-09T12:31:15.475454 | 2019-04-13T04:09:59 | 2019-04-13T04:09:59 | 181,114,023 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def main():
i = 0
n = int(input())
while(i < n):
j = 1
soma = 0
teste = int(input())
while( j < teste):
if( (teste%j) == 0):
soma = soma + j
j += 1
if(teste == soma):
print str(teste) + " eh perfeito"
else:
print str(teste) + " nao eh perfeito"
i += 1
main() | UTF-8 | Python | false | false | 301 | py | 167 | ex1164.py | 163 | 0.48505 | 0.465116 | 0 | 23 | 12 | 40 |
stevenjj/nstrf_2017 | 8,950,711,878,645 | c9671d8c5434cb9087ce0a66e1b565ca70d69e71 | b2a56075ece16a8a558a47ea65b7a80d66cfb283 | /val_logic_manager/src/button_gui/GUI_val_logic.py | 740049ece1bd6be7a3a9b19b19a111818f116b92 | []
| no_license | https://github.com/stevenjj/nstrf_2017 | 5b7a329d0b9c8d988bba9097cfcd2cb33b3e8f3d | f05057c9e7272e76b432a9c1a2401ad6b2881647 | refs/heads/master | 2020-04-05T14:12:23.089866 | 2017-09-07T00:20:27 | 2017-09-07T00:20:27 | 94,827,917 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Val simple GUI
# Based on Andrea Thomaz's lab's class code in UT Austin
# Modified for personal use.
# stevenjj@utexas.edu
#from GUI_params import *
# GUI Command list
GUI_CMD_TOPIC = 'val_logic_manager/operator_command'
INVALID_CMD = "invalid_cmd"
GO_HOME, GO_HOME_GUI_STRING = "go_home", "Go Neutral Pos"
SEND_SINGLE_IK, SEND_SINGLE_IK_GUI_STRING = "send_single_ik", "Send Single IK"
RE_INIT_MARKERS, RE_INIT_MARKERS_GUI_STRING = "re_init_markers", "Re Initialize Markers"
RUN_GRASPLOC, RUN_GRASPLOC_GUI_STRING = "run_grasploc", "Run Grasploc"
GET_NEAREST_GRASP_IK, GET_NEAREST_GRASP_IK_GUI_STRING = "get_nearest_grasp_ik", "Get Nearest Grasp IK"
TRY_NEXT_GRASP_IK, TRY_NEXT_GRASP_IK_GUI_STRING = "try_next_grasp_ik", "Try IK for Next Grasp"
USE_RIGHT_HAND, USE_RIGHT_HAND_GUI_STRING = "use_right_hand", "Use Right Hand"
USE_LEFT_HAND, USE_LEFT_HAND_GUI_STRING = "use_left_hand", "Use Left Hand"
STOP_ALL_TRAJECTORIES, STOP_ALL_TRAJECTORIES_GUI_STRING = "stop_all_trajectories", "Stop All Trajectories"
# ----- Start ------
import signal
import sys
import rospy
#import rospkg
import yaml
from std_msgs.msg import String
from std_msgs.msg import Int8
from PyQt4.QtCore import QTimer
from PyQt4 import QtGui, QtCore
class ValGui(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
newFont = QtGui.QFont("Helvetica", 14, QtGui.QFont.Bold)
# Add a main layout
mainLayout = QtGui.QVBoxLayout(self)
#mainLayout->setMeubBar(menuBar)
# Add buttons with the commands
grid = QtGui.QGridLayout()
grid.setSpacing(3)
# Initialize rosnode
rospy.init_node("steven_simple_val_gui")
#rospack = rospkg.RosPack()
default_pub_topic = GUI_CMD_TOPIC
# Set Commands
self.commands = [GO_HOME_GUI_STRING,
SEND_SINGLE_IK_GUI_STRING,
RE_INIT_MARKERS_GUI_STRING,
RUN_GRASPLOC_GUI_STRING,
GET_NEAREST_GRASP_IK_GUI_STRING,
TRY_NEXT_GRASP_IK_GUI_STRING,
USE_RIGHT_HAND_GUI_STRING,
USE_LEFT_HAND_GUI_STRING,
STOP_ALL_TRAJECTORIES_GUI_STRING
]
positions = [(i,j) for i in range(len(self.commands)) for j in range(3)]
for position, name in zip(positions, self.commands):
button = QtGui.QPushButton(name)
button.setObjectName('%s' % name)
button.setFont(newFont)
button.setStyleSheet("background-color: #FFA500")
button.clicked.connect(self.handleButton)
grid.addWidget(button, *position)
mainLayout.addLayout(grid)
mainLayout.addStretch()
# Show the GUI
self.adjustSize()
self.setWindowTitle("GUI Val Logic")
self.move(400,100)
self.show()
self.raise_()
# # Create the publisher to publish the commands to
self.pub = rospy.Publisher(default_pub_topic, String, queue_size=1)
rospy.loginfo("Finished initializing GUI Val Logic")
# Button handler after its clicked
def handleButton(self):
clicked_button = self.sender()
string_cmd = INVALID_CMD
send_command = False
# # Publish everytime a command is selected from the combo box
command = str(clicked_button.objectName())
if command in self.commands:
send_command = True
if command == GO_HOME_GUI_STRING:
string_cmd = GO_HOME
elif command == SEND_SINGLE_IK_GUI_STRING:
string_cmd = SEND_SINGLE_IK
elif command == RE_INIT_MARKERS_GUI_STRING:
string_cmd = RE_INIT_MARKERS
elif command == RUN_GRASPLOC_GUI_STRING:
string_cmd = RUN_GRASPLOC
elif command == GET_NEAREST_GRASP_IK_GUI_STRING:
string_cmd = GET_NEAREST_GRASP_IK
elif command == TRY_NEXT_GRASP_IK_GUI_STRING:
string_cmd = TRY_NEXT_GRASP_IK
elif command == USE_RIGHT_HAND_GUI_STRING:
string_cmd = USE_RIGHT_HAND
elif command == USE_LEFT_HAND_GUI_STRING:
string_cmd = USE_LEFT_HAND
elif command == STOP_ALL_TRAJECTORIES_GUI_STRING:
string_cmd = STOP_ALL_TRAJECTORIES
else:
string_cmd = INVALID_CMD
rospy.loginfo(command)
if send_command:
msg = String()
msg.data = string_cmd
self.pub.publish(msg)
# def gui_start():
# app = QtGui.QApplication(sys.argv)
# sg = ValGui()
# sys.exit(app.exec_())
def sigint_handler(*args):
"""Handler for the SIGINT signal."""
sys.stderr.write('\r')
QtGui.QApplication.quit()
if __name__ == "__main__":
#gui_start
signal.signal(signal.SIGINT, sigint_handler)
app = QtGui.QApplication(sys.argv)
timer = QTimer()
timer.start(100) # You may change this if you wish.
timer.timeout.connect(lambda: None) # Let the interpreter run each 100 ms.
sg = ValGui()
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 5,136 | py | 75 | GUI_val_logic.py | 37 | 0.622079 | 0.617601 | 0 | 158 | 31.5 | 106 |
niel99/all2 | 5,214,090,320,482 | 9857cfd2abd936c8a83f3fba1dbb42967527b0b0 | 5c1beb1bcea9c355e70642db901d42e828c46e6a | /MUFFINS3.py | cd058ebe4251066f0ddc255815afb819f6fcd07a | []
| no_license | https://github.com/niel99/all2 | 744302f54a80bacc885d6d65d2101a486fc8e9b5 | 8104715203de47087f7632ac3855fd7966ef2be2 | refs/heads/master | 2020-12-02T06:28:37.525192 | 2017-07-11T02:27:20 | 2017-07-11T02:27:20 | 96,841,071 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def leftover(n):
if n%2==0:
l=n/2+1
else:
l=(n+1)/2
l=int(l)
print(l,"\n")
t=int(input())
while t>0:
n=int(input())
leftover(n)
t=t-1
| UTF-8 | Python | false | false | 178 | py | 56 | MUFFINS3.py | 44 | 0.438202 | 0.393258 | 0 | 12 | 13.833333 | 18 |
emmett-framework/rest | 6,588,479,845,652 | 0772c91318d18115feb5c6f176ca663cd3e316a8 | 6d24838061f45704bf612c92e99255c247ffec22 | /emmett_rest/openapi/generation.py | c98e1bdf340661efdb307d1d7da96cd934c84f2c | [
"BSD-3-Clause"
]
| permissive | https://github.com/emmett-framework/rest | a9dbaad0259b9c26b315a81a66a74337a5cab819 | d2c7220889e2031e2aa7212d89de8a5e7184d6ba | refs/heads/master | 2023-05-01T16:42:34.349006 | 2023-04-21T08:02:43 | 2023-04-21T08:02:43 | 234,587,909 | 10 | 1 | BSD-3-Clause | false | 2022-04-11T10:12:12 | 2020-01-17T16:21:43 | 2022-02-14T09:26:17 | 2022-04-11T10:12:12 | 302 | 4 | 1 | 0 | Python | false | false | # -*- coding: utf-8 -*-
"""
emmett_rest.openapi.generation
------------------------------
Provides OpenAPI generation functions
:copyright: 2017 Giovanni Barillari
:license: BSD-3-Clause
"""
import datetime
import decimal
import re
from collections import defaultdict
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union, get_type_hints
from pydantic import BaseModel, Field, create_model
from pydantic.config import BaseConfig
from pydantic.fields import FieldInfo, ModelField
from pydantic.schema import field_schema, model_process_schema
from ..rest import RESTModule
from ..serializers import Serializer
from ..parsers import Parser
from .schemas import OpenAPI
REF_PREFIX = "#/components/schemas/"
_re_path_param = re.compile(r"<(\w+)\:(\w+)>")
_re_path_param_optional = re.compile(r"\(([^<]+)?<(\w+)\:(\w+)>\)\?")
_pydantic_baseconf = BaseConfig()
_path_param_types_map = {
"alpha": str,
"any": str,
"date": str,
"float": float,
"int": int,
"str": str
}
_model_field_types_map = {
"id": int,
"string": str,
"text": str,
"blob": str,
"bool": bool,
"int": int,
"float": float,
"decimal": decimal.Decimal,
"date": datetime.date,
"datetime": datetime.datetime,
"time": datetime.time,
"json": Dict[str, Any],
"jsonb": Dict[str, Any],
"geography": str,
"geometry": str,
"password": str,
"upload": str,
"list:string": List[str],
"list:int": List[int]
}
_def_summaries = {
"index": "List {entity}",
"read": "Retrieve {entity}",
"create": "Create {entity}",
"update": "Update {entity}",
"delete": "Delete {entity}",
"sample": "List random {entity}",
"group": "Group {entity}",
"stats": "Retrieve {entity} stats"
}
_def_descriptions = {
"index": "Returns a list of {entity}",
"read": "Retrieves specific {entity} with the given identifier",
"create": "Creates new {entity} with the given specs",
"update": "Updates specific {entity} with the given identifier and specs",
"delete": "Delete specific {entity} with the given identifier",
"sample": "Returns random selection of {entity}",
"group": "Counts {entity} grouped by the given attribute",
"stats": "Returns {entity} stats for the specified attributes"
}
class MetaModel(BaseModel):
object: str = "list"
has_more: bool = False
total_objects: int = 0
class ErrorsModel(BaseModel):
errors: Dict[str, Any] = Field(default_factory=dict)
class GroupModel(BaseModel):
value: Any
count: int
class StatModel(BaseModel):
min: Union[int, float]
max: Union[int, float]
avg: Union[int, float]
_error_schema = model_process_schema(
ErrorsModel,
model_name_map={},
ref_prefix=None
)[0]
_def_errors = {
"400": {
"description": "Bad request",
"content": {
"application/json": {
"schema": _error_schema
}
}
},
"404": {
"description": "Resource not found",
"content": {
"application/json": {
"schema": _error_schema
}
}
},
"422": {
"description": "Unprocessable request",
"content": {
"application/json": {
"schema": _error_schema
}
}
}
}
def _defs_from_item(obj: Any, key: str):
rv = defaultdict(list)
try:
if issubclass(obj, BaseModel):
rv[obj].append(key)
rv.update(_defs_from_pydantic_model(obj, parent=key))
elif issubclass(obj, Enum):
rv[obj].append(key)
except:
pass
return rv
def _defs_from_pydantic_model(obj: Type[BaseModel], parent: Optional[str] = None):
rv = defaultdict(list)
for key, field in obj.__fields__.items():
parent_key = f"{parent}.{key}" if parent else key
for ikey, ival in _defs_from_item(field.type_, parent_key).items():
rv[ikey].extend(ival)
return rv
def _denormalize_schema(schema: Dict[str, Any], defs: Dict[str, Dict[str, Any]]):
obj_type = schema.get("type")
if "$ref" in schema:
schema.update(defs[schema.pop("$ref")[14:]])
elif obj_type == "object":
for key, value in list(schema.get("properties", {}).items()):
if "$ref" in value:
schema["properties"][key] = defs[value["$ref"][14:]]
elif obj_type == "array":
if "$ref" in schema.get("items", {}):
schema["items"] = defs[schema["items"]["$ref"][14:]]
elif "anyOf" in schema:
for idx, element in list(enumerate(schema["anyOf"])):
if "$ref" in element:
schema["anyOf"][idx] = defs[element["$ref"][14:]]
def _index_default_query_parameters(
module: RESTModule,
sort_enabled: bool = True
) -> List[Dict[str, Any]]:
rv = []
model_map = {}
enums = {module.ext.config.sort_param: []}
for field in module.allowed_sorts:
enums[module.ext.config.sort_param].extend([field, f"-{field}"])
condition_fields = {key: (Any, None) for key in module.query_allowed_fields}
fields = [
ModelField(
name=module.ext.config.page_param,
type_=int,
class_validators=None,
model_config=_pydantic_baseconf,
required=False,
default=1,
field_info=FieldInfo(ge=1)
),
ModelField(
name=module.ext.config.pagesize_param,
type_=int,
class_validators=None,
model_config=_pydantic_baseconf,
required=False,
default=module.ext.config.default_pagesize,
field_info=FieldInfo(
description="Size of the page",
ge=module.ext.config.min_pagesize,
le=module.ext.config.max_pagesize
)
)
]
if sort_enabled:
fields.append(
ModelField(
name=module.ext.config.sort_param,
type_=List[str],
class_validators=None,
model_config=_pydantic_baseconf,
required=False,
default=module.default_sort,
field_info=FieldInfo(
description=(
"Sort results using the specified attribute(s). "
"Descendant sorting applied with -{parameter} notation. "
"Multiple values should be separated by comma."
)
)
)
)
if condition_fields:
where_model = create_model('Condition', **condition_fields)
fields.append(
ModelField(
name=module.ext.config.query_param,
type_=where_model,
class_validators=None,
model_config=_pydantic_baseconf,
required=False,
field_info=FieldInfo(
description=(
"Filter results using the provided query object."
)
)
)
)
model_map[where_model] = 'Condition'
for field in fields:
schema, defs, _ = field_schema(
field, model_name_map=model_map, ref_prefix=None
)
if field.name in enums:
schema["items"]["enum"] = enums[field.name]
elif field.name == module.ext.config.query_param:
schema["allOf"][0] = defs["Condition"]
rv.append({
"name": field.name,
"in": "query",
"required": field.required,
"schema": schema
})
return rv
def _stats_default_query_parameters(module: RESTModule) -> List[Dict[str, Any]]:
rv = []
model_map = {}
condition_fields = {key: (Any, None) for key in module.query_allowed_fields}
fields = [
ModelField(
name='fields',
type_=List[str],
class_validators=None,
model_config=_pydantic_baseconf,
required=True,
field_info=FieldInfo(
description=(
"Add specified attribute(s) to stats. "
"Multiple values should be separated by comma."
)
)
)
]
if condition_fields:
where_model = create_model('Condition', **condition_fields)
fields.append(
ModelField(
name=module.ext.config.query_param,
type_=where_model,
class_validators=None,
model_config=_pydantic_baseconf,
required=False,
field_info=FieldInfo(
description=(
"Filter results using the provided query object."
)
)
)
)
model_map[where_model] = 'Condition'
for field in fields:
schema, defs, _ = field_schema(
field, model_name_map=model_map, ref_prefix=None
)
if field.name == "fields":
schema["items"]["enum"] = module.stats_allowed_fields
if field.name == module.ext.config.query_param:
schema["allOf"][0] = defs["Condition"]
rv.append({
"name": field.name,
"in": "query",
"required": field.required,
"schema": schema
})
return rv
def build_schema_from_fields(
module: RESTModule,
fields: Dict[str, Any],
hints_check: Optional[Set[str]] = None
) -> Tuple[Dict[str, Any], Type[BaseModel]]:
hints_check = hints_check if hints_check is not None else set(fields.keys())
schema_fields, hints_defs, fields_choices = {}, defaultdict(list), {}
for key, defdata in fields.items():
choices = None
if isinstance(defdata, (list, tuple)):
if len(defdata) == 3:
type_hint, type_default, choices = defdata
else:
type_hint, type_default = defdata
else:
type_hint = defdata
type_default = ...
schema_fields[key] = (type_hint, type_default)
if choices:
fields_choices[key] = choices
for key in set(schema_fields.keys()) & hints_check:
for type_arg in [schema_fields[key][0]] + list(getattr(
schema_fields[key][0], "__args__", []
)):
for ikey, ival in _defs_from_item(type_arg, key).items():
hints_defs[ikey].extend(ival)
model = create_model(module.model.__name__, **schema_fields)
schema, defs, nested = model_process_schema(
model,
model_name_map={key: key.__name__ for key in hints_defs.keys()},
ref_prefix=None
)
for def_schema in defs.values():
_denormalize_schema(def_schema, defs)
for key, value in schema["properties"].items():
_denormalize_schema(value, defs)
for key, choices in fields_choices.items():
schema["properties"][key]["enum"] = choices
return schema, model
class OpenAPIGenerator:
def __init__(
self,
title: str,
version: str,
openapi_version: str = "3.0.2",
description: Optional[str] = None,
modules: List[RESTModule] = [],
modules_tags: Dict[str, str] = {},
tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
security_schemes: Optional[Dict[str, Any]] = None,
):
self.openapi_version = openapi_version
self.info: Dict[str, Any] = {"title": title, "version": version}
if description:
self.info["description"] = description
if terms_of_service:
self.info["termsOfService"] = terms_of_service
if contact:
self.info["contact"] = contact
if license_info:
self.info["license"] = license_info
self.modules = modules
self.modules_tags = modules_tags
self.tags = tags or []
self.servers = servers or []
self.security_schemes = security_schemes or {}
def fields_from_model(
self,
model: Any,
model_fields: Dict[str, Any],
fields: List[str]
) -> Dict[str, Tuple[Type, Any, List[Any]]]:
rv = {}
for key in fields:
field = model_fields[key]
ftype = field._type
choices = None
if ftype.startswith("decimal"):
ftype = "decimal"
if ftype.startswith("reference"):
ftype = model._belongs_ref_[key].ftype
if "in" in field._validation and ftype != "bool":
if isinstance(field._validation["in"], (list, tuple)):
choices = list(field._validation["in"])
rv[key] = (
_model_field_types_map.get(ftype, Any),
Field(default_factory=model_fields[key].default)
if callable(model_fields[key].default) else model_fields[key].default,
choices
)
return rv
def build_schema_from_parser(
self,
module: RESTModule,
parser: Parser,
model_fields: Optional[Dict[str, Any]] = None
) -> Tuple[Dict[str, Any], Type[BaseModel]]:
model_fields = model_fields or {
key: module.model.table[key]
for key in module.model._instance_()._fieldset_all
}
fields, hints_check = self.fields_from_model(
module.model, model_fields, parser.attributes
), set()
for key, defdata in getattr(parser, '_openapi_def_fields', {}).items():
if isinstance(defdata, (list, tuple)):
type_hint, type_default = defdata
else:
type_hint = defdata
type_default = ...
fields[key] = (type_hint, type_default)
hints_check.add(key)
return build_schema_from_fields(module, fields, hints_check)
def build_schema_from_serializer(
self,
module: RESTModule,
serializer: Serializer,
model_fields: Optional[Dict[str, Any]] = None
) -> Tuple[Dict[str, Any], Type[BaseModel]]:
model_fields = model_fields or {
key: module.model.table[key]
for key in module.model._instance_()._fieldset_all
}
fields, hints_check = self.fields_from_model(
module.model, model_fields, serializer.attributes
), set()
for key in serializer._attrs_override_:
type_hint = get_type_hints(getattr(serializer, key)).get('return', Any)
type_hint_opt = False
for type_arg in getattr(type_hint, "__args__", []):
if issubclass(type_arg, type(None)):
type_hint_opt = True
fields[key] = (type_hint, None if type_hint_opt else ...)
hints_check.add(key)
for key, defdata in getattr(serializer, '_openapi_def_fields', {}).items():
if isinstance(defdata, (list, tuple)):
type_hint, type_default = defdata
else:
type_hint = defdata
type_default = ...
fields[key] = (type_hint, type_default)
hints_check.add(key)
return build_schema_from_fields(module, fields, hints_check)
def build_definitions(self, module: RESTModule) -> Dict[str, Any]:
serializers, parsers = {}, {}
model_fields = {
key: module.model.table[key]
for key in module.model._instance_()._fieldset_all
}
for serializer_name, serializer in {
"__default__": module.serializer,
**module._openapi_specs["serializers"]
}.items():
if serializer in serializers:
continue
data = serializers[serializer] = {}
serializer_schema, serializer_model = self.build_schema_from_serializer(
module, serializer, model_fields
)
data.update(
name=serializer_name,
model=serializer_model,
schema=serializer_schema
)
for parser_name, parser in {
"__default__": module.parser,
**module._openapi_specs["parsers"]
}.items():
if parser in parsers:
continue
data = parsers[parser] = {}
parser_schema, parser_model = self.build_schema_from_parser(
module, parser, model_fields
)
data.update(
name=parser_name,
model=parser_model,
schema=parser_schema
)
return {
"module": module.name,
"model": module.model.__name__,
"serializers": serializers,
"parsers": parsers,
"schema": serializers[module.serializer]["schema"]
}
def build_operation_metadata(
self,
module: RESTModule,
modules_tags: Dict[str, str],
route_kind: str,
method: str
) -> Dict[str, Any]:
entity_name = (
module._openapi_specs.get("entity_name") or
module.name.rsplit(".", 1)[-1]
)
return {
"summary": _def_summaries[route_kind].format(entity=entity_name),
"description": _def_descriptions[route_kind].format(entity=entity_name),
"operationId": f"{module.name}.{route_kind}.{method}".replace(".", "_"),
"tags": [modules_tags[module.name]]
}
def build_operation_parameters(
self,
module: RESTModule,
path_kind: str,
path_params: Dict[str, Dict[str, Any]]
) -> List[Dict[str, Any]]:
rv = []
for pname, pdata in path_params.items():
rv.append({
"name": pname,
"in": "path",
"required": not pdata["optional"],
"schema": field_schema(
ModelField(
name=pname,
type_=_path_param_types_map[pdata["type"]],
class_validators=None,
model_config=_pydantic_baseconf,
required=not pdata["optional"]
),
model_name_map={},
ref_prefix=REF_PREFIX
)[0]
})
if path_kind == "index":
rv.extend(_index_default_query_parameters(module))
elif path_kind == "sample":
rv.extend(_index_default_query_parameters(module, sort_enabled=False))
elif path_kind == "group":
rv[-1]["description"] = "Group results using the provided attribute"
rv[-1]["schema"]["enum"] = module.grouping_allowed_fields
elif path_kind == "stats":
rv.extend(_stats_default_query_parameters(module))
return rv
def build_operation_common_responses(
self,
path_kind: str
) -> Dict[str, Any]:
rv = {}
if path_kind in ["read", "update", "delete"]:
rv["404"] = _def_errors["404"]
if path_kind in ["create", "update"]:
rv["422"] = _def_errors["422"]
if path_kind in ["index", "sample", "group", "stats"]:
rv["400"] = _def_errors["400"]
return rv
def build_index_schema(
self,
module: RESTModule,
item_schema: Dict[str, Any]
) -> Dict[str, Any]:
fields = {module.list_envelope: (List[Dict[str, Any]], ...)}
if module.serialize_meta:
fields[module.meta_envelope] = (MetaModel, ...)
schema, defs, nested = model_process_schema(
create_model(f"{module.__class__.__name__}Index", **fields),
model_name_map={MetaModel: "Meta"},
ref_prefix=None
)
schema["properties"][module.list_envelope]["items"] = item_schema
if module.serialize_meta:
schema["properties"][module.meta_envelope] = defs["Meta"]
schema["properties"][module.meta_envelope]["title"] = "Meta"
return schema
def build_group_schema(self, module: RESTModule) -> Dict[str, Any]:
fields = {module.groups_envelope: (List[Dict[str, Any]], ...)}
if module.serialize_meta:
fields[module.meta_envelope] = (MetaModel, ...)
schema, defs, nested = model_process_schema(
create_model(f"{module.__class__.__name__}Group", **fields),
model_name_map={MetaModel: "Meta"},
ref_prefix=None
)
schema["properties"][module.groups_envelope]["items"] = model_process_schema(
GroupModel,
model_name_map={},
ref_prefix=None
)[0]
schema["properties"][module.groups_envelope]["items"]["title"] = "Group"
if module.serialize_meta:
schema["properties"][module.meta_envelope] = defs["Meta"]
schema["properties"][module.meta_envelope]["title"] = "Meta"
return schema
def build_stats_schema(self, module: RESTModule) -> Dict[str, Any]:
fields = {"stats": (Dict[str, StatModel], ...)}
schema, defs, nested = model_process_schema(
create_model(f"{module.__class__.__name__}Stat", **fields),
model_name_map={StatModel: "Stat"},
ref_prefix=None
)
schema["properties"]["stats"]["additionalProperties"] = defs["Stat"]
return schema["properties"]["stats"]
def build_paths(
self,
module: RESTModule,
modules_tags: Dict[str, str],
serializers: Dict[Serializer, Dict[str, Any]],
parsers: Dict[Parser, Dict[str, Any]]
) -> Dict[str, Dict[str, Dict[str, Any]]]:
rv: Dict[str, Dict[str, Dict[str, Any]]] = {}
mod_name = module.name.rsplit('.', 1)[-1]
entity_name = module._openapi_specs.get("entity_name") or mod_name
mod_prefix: str = module.url_prefix or "/"
path_prefix: str = (
module.app._router_http._prefix_main + (
f"/{mod_prefix}" if not mod_prefix.startswith("/") else mod_prefix
)
)
for path_kind in set(module.enabled_methods) & {
"index",
"create",
"read",
"update",
"delete",
"sample",
"group",
"stats"
}:
path_relative, methods = module._methods_map[path_kind]
if not isinstance(methods, list):
methods = [methods]
path_scoped: str = path_prefix + path_relative
if path_scoped.endswith("/") and len(path_scoped) > 1:
path_scoped = path_scoped[:-1]
path_params = {}
for ptype, pname in _re_path_param.findall(path_scoped):
path_params[pname] = {"type": ptype, "optional": False}
path_scoped = path_scoped.replace(f"<{ptype}:{pname}>", f"{{{pname}}}")
for _, ptype, pname in _re_path_param_optional.findall(path_scoped):
path_params[pname]["optional"] = True
rv[path_scoped] = rv.get(path_scoped) or {}
serializer_obj = module._openapi_specs["serializers"].get(path_kind)
parser_obj = module._openapi_specs["parsers"].get(path_kind, module.parser)
for method in methods:
operation = self.build_operation_metadata(
module, modules_tags, path_kind, method
)
operation_parameters = self.build_operation_parameters(
module, path_kind, path_params
)
operation_responses = self.build_operation_common_responses(path_kind)
if operation_parameters:
operation["parameters"] = operation_parameters
if (
path_kind in ["create", "update"] or (
path_kind == "delete" and
"delete" in module._openapi_specs["parsers"]
)
):
operation["requestBody"] = {
"content": {
"application/json": {
"schema": parsers[parser_obj]["schema"]
}
}
}
if path_kind in ["create", "read", "update"]:
serializer_obj = module._openapi_specs["serializers"][path_kind]
response_code = "201" if path_kind == "create" else "200"
descriptions = {
"create": "Resource created",
"read": "Resource",
"update": "Resource updated"
}
operation_responses[response_code] = {
"description": descriptions[path_kind],
"content": {
"application/json": {
"schema": serializers[serializer_obj]["schema"]
}
}
}
elif path_kind in ["index", "sample"]:
operation_responses["200"] = {
"description": (
"Resource list" if path_kind == "index" else
"Resource random list"
),
"content": {
"application/json": {
"schema": self.build_index_schema(
module,
serializers[serializer_obj]["schema"]
)
}
}
}
elif path_kind == "delete":
operation_responses["200"] = {
"description": "Resource deleted",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {}
}
}
}
}
elif path_kind == "group":
operation_responses["200"] = {
"description": "Resource groups",
"content": {
"application/json": {
"schema": self.build_group_schema(module)
}
}
}
elif path_kind == "stats":
operation_responses["200"] = {
"description": "Resource stats",
"content": {
"application/json": {
"schema": self.build_stats_schema(module)
}
}
}
if operation_responses:
operation["responses"] = operation_responses
rv[path_scoped][method] = operation
for path_name, path_target, path_data in (
module._openapi_specs["additional_routes"]
):
methods = path_data.methods
for path_relative in path_data.paths:
path_scoped: str = path_prefix + path_relative
if path_scoped.endswith("/") and len(path_scoped) > 1:
path_scoped = path_scoped[:-1]
path_params = {}
for ptype, pname in _re_path_param.findall(path_scoped):
path_params[pname] = {"type": ptype, "optional": False}
path_scoped = path_scoped.replace(
f"<{ptype}:{pname}>", f"{{{pname}}}"
)
for _, ptype, pname in _re_path_param_optional.findall(path_scoped):
path_params[pname]["optional"] = True
rv[path_scoped] = rv.get(path_scoped) or {}
for method in methods:
operation = {
"summary": getattr(
path_target,
"_openapi_desc_summary",
f"{{name}} {path_name.rsplit('.', 1)[-1]}"
).format(name=entity_name),
"description": getattr(
path_target,
"_openapi_desc_description",
""
).format(name=entity_name),
"operationId": f"{path_name}.{method}".replace(".", "_"),
"tags": [modules_tags[module.name]]
}
operation_parameters = self.build_operation_parameters(
module, "custom", path_params
)
operation_responses = {}
if operation_parameters:
operation["parameters"] = operation_parameters
operation_request = getattr(
path_target, "_openapi_def_request", None
)
if operation_request:
schema = build_schema_from_fields(
module,
operation_request["fields"]
)[0]
for file_param in operation_request["files"]:
schema["properties"][file_param] = {
"type": "string",
"format": "binary"
}
operation["requestBody"] = {
"content": {
operation_request["content"]: {
"schema": schema
}
}
}
else:
parser = getattr(
path_target, "_openapi_def_parser", module.parser
)
if parser in parsers:
schema = parsers[parser]["schema"]
else:
schema = self.build_schema_from_parser(module, parser)[0]
operation["requestBody"] = {
"content": {
"application/json": {
"schema": schema
}
}
}
operation_responses = {}
defined_responses = getattr(
path_target, "_openapi_def_responses", None
)
if defined_responses:
for status_code, defined_response in defined_responses.items():
schema = build_schema_from_fields(
module,
defined_response["fields"]
)[0]
operation_responses[status_code] = {
"content": {
defined_response["content"]: {
"schema": schema
}
}
}
else:
serializer = getattr(
path_target, "_openapi_def_serializer", module.serializer
)
if serializer in serializers:
schema = serializers[serializer]["schema"]
else:
schema = self.build_schema_from_serializer(
module, serializer
)[0]
operation_responses["200"] = {
"content": {
"application/json": {
"schema": schema
}
}
}
defined_resp_errors = getattr(
path_target, "_openapi_def_response_codes", []
)
for status_code in defined_resp_errors:
operation_responses[status_code] = _def_errors[status_code]
if operation_responses:
operation["responses"] = operation_responses
rv[path_scoped][method] = operation
return rv
def __call__(self, produce_schemas: bool = False) -> Dict[str, Any]:
data: Dict[str, Any] = {"openapi": self.openapi_version, "info": self.info}
components: Dict[str, Dict[str, Any]] = {}
paths: Dict[str, Dict[str, Any]] = {}
definitions: Dict[str, Dict[str, Any]] = {}
if self.servers:
data["servers"] = self.servers
for module in self.modules:
defs = self.build_definitions(module)
# tags.append({
# "name": module.name,
# "description": module.name.split(".")[-1].title()
# })
paths.update(
self.build_paths(
module,
self.modules_tags,
defs["serializers"],
defs["parsers"]
)
)
definitions[module.name] = defs
if definitions and produce_schemas:
components["schemas"] = {
v["model"]: definitions[k]["schema"]
for k, v in sorted(definitions.items(), key=lambda i: i[1]["model"])
}
if self.security_schemes:
components["securitySchemes"] = self.security_schemes
if components:
data["components"] = components
data["paths"] = paths
if self.tags:
data["tags"] = self.tags
return OpenAPI(**data).dict(by_alias=True, exclude_none=True)
def build_schema(
*,
title: str,
version: str,
openapi_version: str = "3.0.2",
description: Optional[str] = None,
modules: List[RESTModule],
modules_tags: Dict[str, str],
produce_schemas: bool = False,
tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
security_schemes: Optional[Dict[str, Any]] = None,
generator_cls: Optional[Type[OpenAPIGenerator]] = None
) -> Dict[str, Any]:
generator_cls = generator_cls or OpenAPIGenerator
generator = generator_cls(
title=title,
version=version,
openapi_version=openapi_version,
description=description,
modules=modules,
modules_tags=modules_tags,
tags=tags,
servers=servers,
terms_of_service=terms_of_service,
contact=contact,
license_info=license_info,
security_schemes=security_schemes
)
return generator(produce_schemas=produce_schemas)
| UTF-8 | Python | false | false | 35,890 | py | 30 | generation.py | 24 | 0.487991 | 0.485288 | 0 | 968 | 36.076446 | 87 |
SHUwangwei/leetcode | 10,728,828,338,484 | 6be6d8f020f93921bc8c6ede8814666d0d1efd5b | 42af135232387090b629d12150bf67c537bc9e3d | /1023.py | e38b00db107f7b21e6bfa7512bbd99dbc042d0f5 | []
| no_license | https://github.com/SHUwangwei/leetcode | 19205b024f7ad329427d714f5248186a203734a2 | bd93367081a752aa8cf3689944921f4392df1974 | refs/heads/master | 2022-02-27T11:10:05.606379 | 2022-02-18T01:50:37 | 2022-02-18T01:50:37 | 158,354,279 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def camelMatch2(self, queries, pattern):
import re
newpattern = '^([a-z\s]*?)'
for p in pattern:
newpattern += p+'([a-z\s]*?)'
newpattern += '$'
res = []
for query in queries:
match = re.search(newpattern, query)
if match:
res.append(True)
else:
res.append(False)
return res
def camelMatch(self, queries, pattern):
ans = list()
l = len(pattern)
for query in queries:
pos = 0
flag = True
for ch in query:
if pos < l and ch == pattern[pos]:
pos += 1
else:
if "A" <= ch <= "Z":
flag = False
break
if pos < l:
flag = False
ans.append(flag)
return ans
s = Solution()
s.camelMatch([], 'FB') | UTF-8 | Python | false | false | 976 | py | 50 | 1023.py | 50 | 0.405738 | 0.402664 | 0 | 38 | 24.710526 | 50 |
henryoliver/algo-expert-solutions | 4,621,384,825,121 | a94b4667df66301ae74d38059d3191ab966c29ae | 2e26cd20d96764bfe4e3dd29ece1a96cc2461bb8 | /Arrays/smallest-difference.py | 0345114b0c00db71470723b2efb19e2a05a3b247 | [
"MIT"
]
| permissive | https://github.com/henryoliver/algo-expert-solutions | 35ac4df09b83d5fef31c1695fc8404a274312ca9 | c45623861d97862fa62e2461a56d3946adde7e22 | refs/heads/master | 2022-08-28T15:18:55.903334 | 2020-05-26T12:27:34 | 2020-05-26T12:27:34 | 266,343,724 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Smallest Difference
# def smallestDifference(arrayOne=[], arrayTwo=[]):
# '''
# Solution 1 - Brute force (aka: Naive approach)
#
# O(n^2) time | O(1) space
#
# arrayOne: a list of integers
# arrayTwo: a list of integers
# return: a list of two integers
# '''
# closestPair = []
# closestNumber = float('inf')
#
# for firstNumber in arrayOne:
# for secondNumber in arrayTwo:
# absoluteDiff = abs(firstNumber - secondNumber)
#
# if (absoluteDiff < closestNumber):
# closestNumber = absoluteDiff
# closestPair = [firstNumber, secondNumber]
#
# return closestPair
def smallestDifference(arrayOne=[], arrayTwo=[]):
'''
Solution 2 - Sorting along with the two-pointer sliding window approach
O(n log(n) + m log(m)) time | O(1) space
arrayOne: a list of integers
arrayTwo: a list of integers
return: a list of two integers
'''
arrayOne.sort()
arrayTwo.sort()
closestPair = []
closestNumber = float('inf')
arrayOnePointer = 0;
arrayTwoPointer = 0;
while (arrayOnePointer < len(arrayOne) and arrayTwoPointer < len(arrayTwo)):
firstNumber = arrayOne[arrayOnePointer]
secondNumber = arrayTwo[arrayTwoPointer]
currentAbsDiff = abs(firstNumber - secondNumber)
if (firstNumber == secondNumber):
closestPair = [firstNumber, secondNumber]
break
if (currentAbsDiff < closestNumber):
closestNumber = currentAbsDiff
closestPair = [firstNumber, secondNumber]
if (firstNumber < secondNumber):
arrayOnePointer += 1
elif (secondNumber < firstNumber):
arrayTwoPointer += 1
return closestPair
# Test cases (black box - unit testing)
testCases = [
{ 'assert': smallestDifference([-1, 5, 10, 20, 28, 3], [26, 134, 135, 15, 17]), 'expected': [28, 26] },
{ 'assert': smallestDifference([-1, 5, 10, 20, 3], [26, 134, 135, 15, 17]), 'expected': [20, 17] },
{ 'assert': smallestDifference([10, 0, 20, 25], [1005, 1006, 1014, 1032, 1031]), 'expected': [25, 1005] },
# Boundary conditions (empty lists, singleton list, large numbers, small numbers)
{ 'assert': smallestDifference(), 'expected': [] },
{ 'assert': smallestDifference([]), 'expected': [] },
{ 'assert': smallestDifference([], []), 'expected': [] },
{ 'assert': smallestDifference([1], [1]), 'expected': [1, 1] },
{ 'assert': smallestDifference([1, 2, 3, 4]), 'expected': [] },
{ 'assert': smallestDifference([-1, -1, -1], [-1, -1, -1]), 'expected': [-1, -1] },
# Extremes
]
# Run tests
for (index, test) in enumerate(testCases):
print(f'# Test {index + 1}')
print(f'Actual: {test["assert"]}')
print(f'Expected: {test["expected"]}')
print('🤘 Test PASSED 🤘' if test["assert"] == test["expected"] else '👎 Test FAILED 👎', '\n')
| UTF-8 | Python | false | false | 2,945 | py | 18 | smallest-difference.py | 17 | 0.596318 | 0.559836 | 0 | 88 | 32.318182 | 110 |
puter/trucking | 12,635,793,833,836 | 04d35f41990d319f914f45fa0c470be95156ff10 | 6e9b22da8cd32ab0795eadad8a80ab615a121fe7 | /service/migrations/0002_auto_20160925_1706.py | 06b5263b9e3536479db08efb82954e1dfe7ea173 | []
| no_license | https://github.com/puter/trucking | 0e7fc0dc2a2044106dd614870d692519b4210201 | bcaac15df645147cb4f80c8eab25b70829435d37 | refs/heads/master | 2019-08-09T16:30:26.635336 | 2017-04-30T17:27:19 | 2017-04-30T17:27:19 | 67,470,285 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('service', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('miles', models.FloatField()),
('serviceDate', models.DateField()),
],
),
migrations.CreateModel(
name='Part',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.TextField(max_length=63)),
('description', models.TextField(max_length=63)),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='truck',
name='last_serviced',
field=models.DateField(default=datetime.date.today),
),
migrations.AddField(
model_name='part',
name='truck',
field=models.ForeignKey(to='service.Truck'),
),
migrations.AddField(
model_name='entry',
name='parts',
field=models.ManyToManyField(to='service.Part'),
),
migrations.AddField(
model_name='entry',
name='truck',
field=models.ForeignKey(to='service.Truck'),
),
]
| UTF-8 | Python | false | false | 1,633 | py | 47 | 0002_auto_20160925_1706.py | 28 | 0.511329 | 0.505818 | 0 | 54 | 29.240741 | 114 |
nikostuoc/Novartis2020 | 5,523,327,980,388 | 76757eff40f3cfbc3bd4300eda936d8cbf663847 | dc507bcfa20dc9f50440a92997ea346a7e573018 | /pre-datathon/models/ci_metric_optimizer_cb.py | bfd1434968554efe854d35ebfd308c566068d9a3 | [
"MIT"
]
| permissive | https://github.com/nikostuoc/Novartis2020 | bd81d008909f7e3610555e7a5c9d80c3953e99b3 | 390f34efa6bbc1e168f4e58d2d335c7cfa7d865e | refs/heads/main | 2023-03-18T11:32:25.640206 | 2020-11-30T14:16:49 | 2020-11-30T14:16:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
####################
#
# This doesn't work. Catboost works with "batches", or on the fly,
# and we need the full vector to compute gradient.
#
#
####################
import pandas as pd
import numpy as np
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from tools.simple_metrics import (
interval_score_loss,
interval_score_metric,
interval_score_objective,
duplicate_df
)
from tools.catboost_custom import IntervalScoreObjective, IntervalScoreMetric
if __name__ == "__main__":
# Load data and split
train_raw = pd.read_csv("data/feature_engineered/train_1.csv")
# test_raw = pd.read_csv("data/feature_engineered/test_1.csv")
train, val = train_test_split(
train_raw,
random_state=42
)
# Create duplicated datasets - we'll be able to predict upper and lower bound,
# one for each of the replications
print(train.shape)
train = duplicate_df(train)
val = duplicate_df(val)
# Clean data
to_drop = ['target', 'Cluster', 'brand_group', 'cohort', 'Country']
train_x = train.drop(columns=to_drop)
train_y = train.target
val_x = val.drop(columns=to_drop)
val_y = val.target
error = {}
error_train ={}
for n_estimators in [10000]:
print("-" * 20)
print(n_estimators)
# Create and fit lgbm - use interval_score_objective
cb = CatBoostRegressor(loss_function=IntervalScoreObjective(),
eval_metric="MAE",
n_estimators=n_estimators,
verbose=0)
len_real_train = int(len(train_y) / 2)
weights = np.ones(len(train_y))
weights[0:len_real_train] += .0001
weights[len_real_train:len(train_y)] -= .0001
cb.fit(
train_x, train_y, sample_weight=weights
)
# Predict duplicates
preds = cb.predict(val_x)
# Split lower and upper bounds
len_real_val = int(len(val_y) / 2)
lower_bound_preds = preds[len_real_val:]
upper_bound_preds = preds[:len_real_val]
# Get real target
y_real = val_y[len_real_val:]
error[n_estimators] = interval_score_loss(
lower_bound_preds,
upper_bound_preds,
y_real,
alpha=0.25
)
# Compute loss
print(
error[n_estimators]
)
# Predict duplicates
preds_train = cb.predict(train_x)
# Split lower and upper bounds
len_real_train = int(len(train_y) / 2)
lower_bound_preds = preds_train[len_real_train:]
upper_bound_preds = preds_train[:len_real_train]
# Get real target
y_real = train_y[len_real_train:]
error_train[n_estimators] = interval_score_loss(
lower_bound_preds,
upper_bound_preds,
y_real,
alpha=0.25
)
# Compute loss
print(
error_train[n_estimators]
)
# 550-ish
print(error)
print(error_train)
# Valid error with n trees
# {100: 1543.2253280792577, 300: 1412.4471969337526, 500: 1308.6674361817093,
# 1000: 1227.963743360362, 1500: 1173.3121939442262}
# Train error with n trees
# {100: 1383.4605985212715, 300: 1279.224563537674, 500: 1187.3307309950724,
# 1000: 1117.5252313250912, 1500: 1073.8336250693296}
# Val - train 10k trees
# {10000: 903.3864766737887}
# {10000: 836.0708863713029}
| UTF-8 | Python | false | false | 3,566 | py | 67 | ci_metric_optimizer_cb.py | 41 | 0.57908 | 0.500841 | 0 | 133 | 25.804511 | 82 |
KtechB/machina | 15,066,745,296,801 | 70bf06b6b988d122593620d69e9e323494d68a87 | 6937a62ed792ce161231b84591ff791a51b70dfe | /machina/__init__.py | e23cee99fb42497b40b287fc9f435f559ebfa8f7 | [
"MIT"
]
| permissive | https://github.com/KtechB/machina | 75bfb3ed48675649afef81f01f498c1af3d880f6 | 24eca9cc9b89a0e0b9e026282f17c7b9fe2869ab | refs/heads/master | 2020-09-22T08:24:40.454344 | 2019-12-05T06:54:05 | 2019-12-05T06:54:05 | 225,120,598 | 1 | 0 | MIT | true | 2019-12-01T07:06:16 | 2019-12-01T07:06:16 | 2019-11-21T08:34:52 | 2019-11-16T12:38:22 | 7,185 | 0 | 0 | 0 | null | false | false | import pkg_resources
__version__ = pkg_resources.get_distribution('machina-rl').version
from machina import algos # NOQA
from machina import envs # NOQA
from machina import models # NOQA
from machina import noise # NOQA
from machina import optims # NOQA
from machina import pds # NOQA
from machina import pols # NOQA
from machina import prepro # NOQA
from machina import samplers # NOQA
from machina import traj # NOQA
from machina import vfuncs # NOQA
| UTF-8 | Python | false | false | 468 | py | 140 | __init__.py | 89 | 0.760684 | 0.760684 | 0 | 17 | 26.529412 | 66 |
mekhami/studychess | 9,302,899,206,881 | 202c89ef9be25d479e1e8b45358df4022588955f | 92871972affa492c17987f5615c1f919d1b7434d | /studychess/core/models.py | 3ac9271cc32217a8931ddb40d3ffa0b1df7b7a08 | []
| no_license | https://github.com/mekhami/studychess | 2f9c0416e09b157a1fda844e17409eda7fdb4374 | 47e31578312e4109c5c4987550344e6747ee8cad | refs/heads/master | 2020-04-06T04:14:57.015166 | 2017-04-16T01:57:51 | 2017-04-16T01:57:51 | 82,981,850 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from django.db import models
from channels import Group
from channels.binding.websockets import WebsocketBinding
import logging
logger = logging.getLogger(__name__)
class Post(models.Model):
SITE_CHOICES = (
('Lichess', 'Lichess'),
('Chess.com', 'Chess.com'),
)
name = models.CharField(max_length=255)
time = models.DateTimeField(auto_now=True)
description = models.TextField()
site = models.CharField(max_length=50, choices=SITE_CHOICES)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
message = {'type': 'add', 'players': [self.as_dict()]}
print(message)
Group('listeners').send({
'text': json.dumps(message),
})
def as_dict(self):
return {
'pk': self.pk,
'name': self.name,
'description': self.description,
'site': self.site
}
def delete(self):
logger.info('called delete on instance {}'.format(self.name))
Group("listeners").send({
'text': json.dumps({'player': self.pk, 'type': 'delete'})
})
super().delete()
class PostBinding(WebsocketBinding):
model = Post
stream = "post"
fields = ["name", "description", "site"]
@classmethod
def group_names(cls, instance):
return ["post-updates"]
def has_permission(self, user, action, pk):
return True
model = Post
stream = "post"
fields = ["name", "description", "site"]
| UTF-8 | Python | false | false | 1,568 | py | 14 | models.py | 5 | 0.577806 | 0.574617 | 0 | 62 | 24.290323 | 69 |
chrisrioux2/CLEBlog | 4,844,723,148,358 | b13763b83c2d4b33dfcedab5e26b45a3d4f6eac6 | 17a816ff2c033a9e75ad8ad94f5750b1d6d82ca6 | /venv/lib/python2.7/ntpath.py | 20a224db13d3436fb3dde212aadc9c9b5afabf6f | []
| no_license | https://github.com/chrisrioux2/CLEBlog | 43669bbe72c6a766f6886a33e5d76297523027d6 | 594436f1a84ec21c56fd474683a9c9dc5a07cee2 | refs/heads/master | 2015-08-17T10:08:20.997914 | 2014-11-27T22:59:19 | 2014-11-27T22:59:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /Users/Shuffleberries/anaconda/lib/python2.7/ntpath.py | UTF-8 | Python | false | false | 54 | py | 23 | ntpath.py | 22 | 0.851852 | 0.814815 | 0 | 1 | 54 | 54 |
hkingravi/KMVis | 1,563,368,139,223 | d15521d56675d226d88047426b936cc968935683 | f91caf318a6c6849a37dac1df01f7383201cf304 | /examples/kernel_example.py | de584b4b5c1ae8e30d92cf9c9165e3fc07dc7876 | []
| no_license | https://github.com/hkingravi/KMVis | 3b713dd42079b60f4876fabaab1124628097f435 | 40106c924d1e216c4720a9f6df6468e89f271f5e | refs/heads/master | 2021-01-22T10:14:58.414301 | 2014-07-08T02:40:42 | 2014-07-08T02:40:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # this script tests KernelType and kernel
# numpy stuff
import numpy as np
from scipy.io import savemat
# matplotlib stuff
from matplotlib import rc
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# add class directory to path
import sys
sys.path.insert(0, '../src/core')
sys.path.insert(0, '../src/exceptions')
sys.path.insert(0, '../data/examples')
# our imports
from kernel import kernel
from KernelType import KernelType
from Error import *
# boolean which tells program to save output so it
# can be used by unit testing framework
save_data = False;
# initialize kernels
k_name1 = "gaussian"
k_name2 = "sigmoid"
k_name3 = "polynomial"
k_name4 = "laplacian"
k_name5 = "cauchy"
k_name6 = "wrongname"
k_name7 = "periodic"
k_name8 = "locally_periodic"
k_params1 = np.array( [1.2] )
k_params2 = np.array( [0.5, 1.2] )
k_params3 = np.array( [2, 0] )
k_params4 = np.array( [1.2] )
k_params5 = np.array( [1.2] )
k_params7 = np.array( [1.2, 0.5] )
k_params8 = np.array( [0.5, 1] )
try:
k1 = KernelType(k_name1,k_params1)
except Exception, e:
print e.args
try:
k2 = KernelType(k_name2,k_params2)
except Exception, e:
print e.args
try:
k3 = KernelType(k_name3,k_params3)
except Exception, e:
print e.args
try:
k4 = KernelType(k_name4,k_params4)
except Exception, e:
print e.args
try:
k5 = KernelType(k_name5,k_params5)
except Exception, e:
print e.args
try:
k6 = KernelType(k_name6,k_params5)
except KernelTypeError, e:
s = str(e)
print s
try:
k7 = KernelType(k_name7,k_params7)
except Exception, e:
print e.args
try:
k8 = KernelType(k_name8,k_params8)
except Exception, e:
print e.args
# print the names of the kernels
print k1
print k2
print k3
print k4
print k5
print k7
# now, generate plots for kernels
x = np.arange(-5,5,0.1)
x_rad = np.arange(-3,7,0.1)
y = np.array([2]) # y vals
k_gauss = kernel(x_rad,y,k1)
k_sigm = kernel(x,y,k2)
k_poly = kernel(x,y,k3)
k_lap = kernel(x_rad,y,k4)
k_cauchy = kernel(x_rad,y,k5)
k_periodic = kernel(x_rad,y,k7)
k_locally_periodic = kernel(x_rad,y,k8)
# save files in test data directory
if save_data:
savemat('../data/unit_tests/test_kernel.mat',
{'k_gauss':k_gauss,'k_sigm':k_sigm,'k_poly':k_poly,'k_lap':k_lap,
'k_cauchy':k_cauchy,'k_periodic':k_periodic,'k_locally_periodic':k_locally_periodic})
# plot Gaussian kernel values
# turns on Tex
rc('text', usetex=True)
rc('font', family='serif')
fig = plt.figure()
ax = fig.gca()
ax.plot(x,k_gauss,'b', linewidth=2.5)
ax.set_title(r"Gaussian kernel with $\sigma = 1.2$",fontsize=20)
fig2 = plt.figure()
ax2 = fig2.gca()
ax2.plot(x,k_sigm,'b', linewidth=2.5)
ax2.set_title(r"Sigmoid kernel with $\alpha = 0.5, c = 1.2$",fontsize=20)
fig3 = plt.figure()
ax3 = fig3.gca()
ax3.plot(x,k_poly,'b', linewidth=2.5)
ax3.set_title(r"Polynomial kernel with $\gamma = 2, c = 0$",fontsize=20)
fig = plt.figure()
ax = fig.gca()
ax.plot(x,k_lap,'b', linewidth=2.5)
ax.set_title(r"Laplacian kernel with $\sigma = 1.2$",fontsize=20)
fig = plt.figure()
ax = fig.gca()
ax.plot(x,k_cauchy,'b', linewidth=2.5)
ax.set_title(r"Cauchy kernel with $\sigma = 1.2$",fontsize=20)
fig = plt.figure()
ax = fig.gca()
ax.plot(x,k_periodic,'b', linewidth=2.5)
ax.set_title(r"Periodic kernel with $\sigma = 1.2$, $p = 0.5$",fontsize=20)
fig = plt.figure()
ax = fig.gca()
ax.plot(x,k_locally_periodic,'b', linewidth=2.5)
ax.set_title(r"Locally Periodic kernel with $\sigma = 1.2$, $p = 0.5$",fontsize=20)
plt.draw()
plt.show() | UTF-8 | Python | false | false | 3,536 | py | 38 | kernel_example.py | 37 | 0.667138 | 0.626697 | 0 | 149 | 22.738255 | 98 |
Jefersonnnn/URIOnlineJudge | 15,831,249,490,076 | 4650a4629caa1e89d09ed85343ea4853a4248c72 | 23f97feaf08623c4bcca0dd9051903b47465e2fd | /desafios/triangulos.py | 4f6490edd158ae51c6eb665558774c85bdcca9fd | []
| no_license | https://github.com/Jefersonnnn/URIOnlineJudge | ff1f2d1467fa52b1e49561ec53b6b69b2899707e | 166e85e9298e479ce5064e2b435414534acaa22e | refs/heads/master | 2023-04-01T05:39:06.639041 | 2023-03-11T00:48:18 | 2023-03-11T00:48:18 | 352,818,135 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Complete a função verificaTriangulo, que recebe 3 valores inteiros como parâmetro,
considerando que esses valores representam os 3 lados de um triângulo. Como resultado,
o método deve informar se os valores realmente correspondem a um triângulo,
e se o triângulo é equilátero, isósceles ou escaleno.
A saída deve ser apenas um número, conforme o demonstrado abaixo:
0: não é um triângulo
1: equilátero # "triângulo que possui três lados com medidas iguais."
2: isósceles # "triângulo que possui dois lados com medidas iguais"
3: escaleno # "triângulo que possui todos os lados com medidas diferentes"
Veja mais sobre "O que é triângulo?" em: https://brasilescola.uol.com.br/o-que-e/matematica/o-que-e-triangulo.htm
Condição do triângulo
Só irá existir um triângulo se, somente se, os seus lados obedeceram à seguinte regra:
um de seus lados deve ser maior que o valor absoluto (módulo) da diferença dos outros dois lados
e menor que a soma dos outros dois lados.
| b - c | < a < b + c
| a - c | < b < a + c
| a - b | < c < a + b
"""
def validaTriangulo(ladoA: int, ladoB: int, ladoC: int) -> bool:
if abs(ladoB - ladoC) < ladoA < ladoB + ladoC:
if abs(ladoA - ladoC) < ladoB < ladoA + ladoC:
if abs(ladoA - ladoB) < ladoC < ladoA + ladoB:
return True
return False
def verificaTriangulo(lado_a: int, lado_b: int, lado_C: int) -> int:
if not validaTriangulo(lado_a, lado_b, lado_C):
return 0
isEquilatero = lado_a == lado_b == lado_C
isIsosceles = lado_a == lado_b or lado_a == lado_C or lado_b == lado_C
isEscaleno = lado_a != lado_b != lado_C
if isEquilatero:
return 1
if isIsosceles:
return 2
if isEscaleno:
return 3
if __name__ == '__main__':
tipos_triangulo = ("Não é um triângulo", "Equilátero", "Isósceles", "Escaleno")
lados_triangulo = input("Digite os três lado do triangulo separado por virgula (,) [Ex: 5,10,9]: ")
lados_triangulo = lados_triangulo.split(',')
ladoA = int(lados_triangulo[0])
ladoB = int(lados_triangulo[1])
ladoC = int(lados_triangulo[2])
class_triangulo = verificaTriangulo(ladoA, ladoB, ladoC)
print(f"Resultado: {tipos_triangulo[class_triangulo]}")
| UTF-8 | Python | false | false | 2,274 | py | 19 | triangulos.py | 18 | 0.67576 | 0.668157 | 0 | 63 | 34.492063 | 113 |
ssebadduka/cuniculture_app | 14,156,212,221,328 | 295521e0f3d4ab8e566dff07e83d8213864eb066 | bd48b3c60328f9c37fdcce5adcf7ee84218a40be | /cuniculture_app/views/farm_views.py | 94a15614708dce7d1ad6fc6a0965053eabfb353d | []
| no_license | https://github.com/ssebadduka/cuniculture_app | 95384f6dc128cca1883e674155100acd6a851aec | 00d806acf8e30fdd0dec9097021d81e66ee7d4d5 | refs/heads/master | 2023-02-25T19:23:05.178697 | 2021-02-01T07:46:05 | 2021-02-01T07:46:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect, reverse
from django.contrib import messages
from cuniculture_app.forms.farm_forms import FarmForm
from cuniculture_app.selectors.farms_selectors import get_farms, get_farm
def manage_farms(request):
farms = get_farms()
farm_form = FarmForm()
if request.method == "POST":
farm_form = FarmForm(request.POST, request.FILES)
if farm_form.is_valid():
farm_form.save()
messages.success(request, 'Farm Record saved Successfully!')
else:
messages.warning(request, 'Operation Not Successfull')
return HttpResponseRedirect(reverse(manage_farms))
context = {
"farm": "active",
"farms": farms,
"farm_form":farm_form
}
return render (request, "farms/manage_farms.html", context)
def edit_farm_view(request, farm_id):
farm = get_farm(farm_id)
farm_form = FarmForm(instance=farm)
if request.method == "POST":
farm_form = FarmForm(request.POST, request.FILES)
if farm_form.is_valid():
farm_form.save()
messages.success(request, 'Changes saved Successfully!')
else:
messages.warning(request, 'Operation Not Successfull')
return HttpResponseRedirect(reverse(manage_farms))
def delete_farm_view(request, farm_id):
farm = get_farm(farm_id)
farm.delete()
messages.success(request, 'Farm Deleted Successfull')
return HttpResponseRedirect(reverse(manage_farms))
| UTF-8 | Python | false | false | 1,635 | py | 19 | farm_views.py | 19 | 0.634251 | 0.634251 | 0 | 53 | 28.849057 | 73 |
spdx/tools-python | 7,232,724,945,603 | 559285e4360eb0d09742c9eab873bd74d21d3068 | bfc42c114f652012b6cfd14e7cccf52cb6b9ac7e | /src/spdx_tools/spdx3/writer/console/annotation_writer.py | 3261a69bd29902db7789e58949beecae6f92e0e2 | [
"Apache-2.0",
"GPL-2.0-only"
]
| permissive | https://github.com/spdx/tools-python | 05a952501af2ac608678cb1737f7c661f6091fa2 | 777bd274dd06cb24342738df7da5ab285d652350 | refs/heads/main | 2023-08-31T09:39:52.930063 | 2023-08-24T06:39:48 | 2023-08-24T10:22:33 | 32,761,058 | 147 | 136 | Apache-2.0 | false | 2023-09-14T15:50:59 | 2015-03-23T21:54:39 | 2023-09-08T16:55:58 | 2023-09-14T15:50:57 | 3,593 | 138 | 141 | 57 | Python | false | false | # SPDX-FileCopyrightText: 2023 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
from beartype.typing import TextIO
from spdx_tools.spdx3.model import Annotation
from spdx_tools.spdx3.writer.console.console import write_value
from spdx_tools.spdx3.writer.console.element_writer import write_element_properties
def write_annotation(annotation: Annotation, text_output: TextIO):
text_output.write("## Annotation\n")
write_element_properties(annotation, text_output)
for property_name in Annotation.__annotations__.keys():
write_value(property_name, getattr(annotation, property_name), text_output)
| UTF-8 | Python | false | false | 628 | py | 378 | annotation_writer.py | 354 | 0.780255 | 0.765924 | 0 | 16 | 38.25 | 83 |
martyone/btts | 17,755,394,831,262 | b6680418096b1255c6654add6026fa9233898eb6 | db42ec397e3e06a199b0842f295ddc4ebb0f4483 | /lib/python/btts/voicecall.py | ff3f130caeb5c52f4854b392e4312fff991e3353 | []
| no_license | https://github.com/martyone/btts | 4c53a0f273dd19a00adbc14816978743f846e74e | 9b39cf1e157554e6a14bfedf1ab82edc3fc3f12b | refs/heads/master | 2021-01-16T00:04:59.717605 | 2014-05-02T10:16:44 | 2014-05-05T06:13:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# BTTS - BlueTooth Test Suite
#
# Copyright (C) 2014 Jolla Ltd.
# Contact: Martin Kampas <martin.kampas@jollamobile.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dbus
import btts
class VoiceCall:
class Error(Exception):
pass
class NotReadyError(Error):
def __init__(self):
VoiceCall.Error.__init__(self, 'Not ready')
def __init__(self):
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object('org.ofono', '/'),
'org.ofono.Manager')
self._modem_object = None
device_address = btts.Config().device.upper()
modems = manager.GetModems()
for path, properties in modems:
try:
if (properties['Type'] == 'hfp' and
properties['Serial'] == device_address):
self._modem_object = bus.get_object('org.ofono', path)
break
except KeyError:
pass
def __getattr__(self, member):
self._ensure_ready()
if member.startswith('__') and member.endswith('__'):
raise AttributeError(member)
else:
return self._modem_object.get_dbus_method(
member, 'org.ofono.VoiceCallManager')
def _ensure_ready(self):
if not self._modem_object:
raise self.NotReadyError()
| UTF-8 | Python | false | false | 1,927 | py | 110 | voicecall.py | 32 | 0.607161 | 0.604567 | 0 | 60 | 31.116667 | 74 |
deepakskosanam/montys-codes | 19,327,352,863,732 | e9aa6c2b7620ea3ad3fe7ada49c8f18927265a2f | fbcc62632fed3dcbd00498cba2a4247b4455a111 | /tutorials/tutorial_5.py | d263e624516bf416d4b29621ab4a8c6564e87cde | []
| no_license | https://github.com/deepakskosanam/montys-codes | 38d0ed05c24c5c3f4fe9badf46e26cea1918682e | b22fa451c33e6b798f6482ab5702cb23479700b5 | refs/heads/master | 2020-05-01T07:04:48.035081 | 2019-04-07T08:23:13 | 2019-04-07T08:23:13 | 177,344,461 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # PYTHON TUTORIALS 5
""" DATA STRUCTURES """
# More on Lists: Methods
"""
list.append(x) # Equals a[len(a):] = [x]
list.extend(x) # Equals a[len(a):] = iterable
list.insert(i.x)
# a.insert(0,x) inserts at the front of List
# a.insert(len(a),x) inserts at end of list. Equals a.append(x)
list.remove(x) # Removes first occurance of value "x"
list.pop(i) # Removes and returns i-th element. No "i" removes last element.
list.clear() # Removes all elements. Equals "del a[:]"
list.index(x[,start[,end]]) # First Item with value "x". Options narrow subsequence
list.count(x) # Count of value "x"
list.sort(key = none, reverse = False) #sort items in place (SEE SORTED())
list.reverse() # reverse elements in List
list.copy() # Return a shallow copy of the List. Equals a[:]
SHALLOW COPY: References original list values. Editing copy will modify original.
DEEP COPY: Complete and independent copy of original list.
"""
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
fruits.count('apple')
fruits.count('tangerine')
fruits.index('banana')
fruits.index('banana', 4) # Find next banana starting a position 4
fruits.reverse()
fruits
fruits.append('grape')
fruits
fruits.sort()
fruits
fruits.pop()
# USE .append and .pop to add/remove elements to the end of Lists
# Add/remove from both ends of a List
# USE collections.deque
from collections import deque
queue = deque(["Eric", "John", "Michael"])
queue.append("Terry") # Terry arrives
queue.append("Graham") # Graham arrives
queue.popleft() # The first to arrive now leaves
queue.popleft() # The second to arrive now leaves
queue # Remaining queue in order of arrival
| UTF-8 | Python | false | false | 1,733 | py | 3 | tutorial_5.py | 3 | 0.678015 | 0.675707 | 0 | 52 | 32.326923 | 83 |
ThonyPrice/MLcourse | 9,706,626,109,755 | 5730d550d4950e1b387ef65d339676532e2b1afe | d713b6aba783a9c9cfbcfa4dc07674e6fa5992c3 | /ML_2/main.py | a1b13f662f146ae394b6d87b2aa933e72228b0a2 | []
| no_license | https://github.com/ThonyPrice/MLcourse | bf264511af30d3d8421673cdb487f28a3b0ac136 | 6d258690e47326b6c3e8ebda45aead3ce4a3c4a4 | refs/heads/master | 2020-03-28T06:03:10.848496 | 2018-09-07T10:54:18 | 2018-09-07T10:54:18 | 147,811,267 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from cvxopt.solvers import qp
from cvxopt.base import matrix
import numpy, pylab, random, math
def main():
data = generateData()
pMatrix = pMatrixCreator(data)
q,h = qhVectorCreator(len(data))
gMatrix = gMatrixCreator(len(data))
#print(pMatrix[0][0])
r = qp(matrix(pMatrix), matrix(q), matrix(gMatrix), matrix(h))
alpha = list(r['x'])
svm = nonZeroAlphas(data, alpha)
res = indicator((1.25,1),svm)
#print(res)
plotDB(svm,data)
def plotDB(svm,data):
xrange=numpy.arange(-4,4,0.05)
yrange =numpy.arange(-4,4,0.05)
grid=matrix([[indicator((x,y),svm)
for y in yrange]
for x in xrange])
pylab.contour(xrange,yrange,grid,(-1.0,0.0,1.0),colors=('red','black','blue'),linewidths=(1,3,1))
pylab.show()
def nonZeroAlphas(data, alpha):
values = []
for i in range(len(alpha)):
if alpha[i] > 10**-5:
values.append((data[i][0] ,data[i][1], data[i][2], alpha[i]))
return values
def indicator(dataPoint, svm):
res = 0.0
for tup in svm:
res += tup[3]*tup[2]*polynomialKernel(dataPoint, tup[0:2])
return res
def linearKernel(vectorX, vectorY):
if(len(vectorX)!=len(vectorY)):
print("Vector length not equal.")
return 0.0
scalar = 0.0
for i in range(0,len(vectorX)):
scalar += vectorX[i]*vectorY[i]
return scalar+1.0
def polynomialKernel(vectorX, vectorY):
if(len(vectorX)!=len(vectorY)):
print("Vector length not equal.")
return 0
scalar = 0.0
for i in range(0,len(vectorX)):
scalar += vectorX[i]*vectorY[i]
return (scalar+1.0)**3
def radialKernel(vectorX, vectorY):
tempVec = (vectorX[0]-vectorY[0], vectorX[1]-vectorY[1])
scalar = 0.0
parameter=2
for i in range(0,len(tempVec)):
scalar += tempVec[i]*tempVec[i]
return math.exp(-scalar/(2*(parameter**2)))
def pMatrixCreator(dataSet):
n = len(dataSet)
pMatrix = [[0.0 for x in range(n)] for y in range(n)]
for i in range(n):
x = dataSet[i]
for j in range(n):
y = dataSet[j]
pMatrix[i][j] = x[2]*y[2]*polynomialKernel(x[:2] ,y[:2])
return pMatrix
def qhVectorCreator(n):
q = [-1.0]*n
h = [0.0]*n
return [q],[h]
def gMatrixCreator(n):
gMatrix = [[0.0 for x in range(n)] for y in range(n)]
for i in range(n):
gMatrix[i][i] = -1.0
return gMatrix
def generateData():
random.seed(100)
classA = [(random.normalvariate(-1.5 ,1),
random.normalvariate(0.5 ,1),
1.0)
for i in range(10)] + \
[(random.normalvariate(1.5 ,1),
random.normalvariate(0.5 ,1),
1.0)
for i in range(10)]
classB = [(random.normalvariate(0.0 ,0.5),
random.normalvariate(-0.5 ,0.5),
-1.0) for i in range(20)]
data = classA + classB
random.shuffle(data)
pylab.hold(True)
pylab.plot( [p[0] for p in classA],
[p[1] for p in classA],'bo')
pylab.plot( [p[0] for p in classB],
[p[1] for p in classB], 'ro')
#pylab.show()
return data
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,180 | py | 6 | main.py | 2 | 0.568868 | 0.533019 | 0 | 133 | 22.909774 | 101 |
bvillasen/simulation_analysis | 18,202,071,406,064 | 3f578dc7639b4f895514961105fcaeee856dbd2b | d768f07ed90c0274e2d9d935eaf5ecfe734a1f56 | /lya_statistics/compute_los_tau.py | 01d2d2cc7a7d751edefaeb7583a78f7d5f5cfe09 | []
| no_license | https://github.com/bvillasen/simulation_analysis | cfd0b5de865d2fb5992d828b2824079e6798774b | 645f0c397172ed30a713368942eec9ca68a9761a | refs/heads/master | 2023-06-02T19:06:39.851760 | 2021-06-25T18:40:58 | 2021-06-25T18:40:58 | 298,894,454 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os, sys
import numpy as np
import h5py as h5
root_dir = os.path.dirname(os.getcwd()) + '/'
subDirectories = [x[0] for x in os.walk(root_dir)]
sys.path.extend(subDirectories)
from flux_power_spectrum import get_skewer_flux_power_spectrum
from load_skewers import load_skewers_multiple_axis
from spectra_functions import compute_optical_depth
from tools import *
use_mpi = True
if use_mpi :
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nprocs = comm.Get_size()
else:
rank = 0
nprocs = 1
print_out = False
if rank == 0: print_out = True
# uvb = 'hm12'
# uvb = 'pchw18'
# n_snap = 169
parameters = sys.argv
if print_out: print( parameters )
for option in parameters:
if option.find("n_snap") != -1: n_snap = int(option[option.find('=')+1:])
if option.find("uvb") != -1: uvb = option[option.find('=')+1:]
if print_out: print( f'Snapshot: {n_snap}' )
# dataDir = '/home/bruno/Desktop/ssd_0/data/'
# dataDir = '/raid/bruno/data/'
dataDir = '/data/groups/comp-astro/bruno/'
simulation_dir = dataDir + 'cosmo_sims/2048_hydro_50Mpc/'
input_dir = simulation_dir + 'skewers_{0}_HeII/'.format(uvb)
output_dir = simulation_dir + 'skewers_{0}_HeII/los_F/'.format(uvb)
if rank == 0: create_directory( output_dir )
# Box parameters
Lbox = 50000.0 #kpc/h
nPoints = 2048
nx = nPoints
ny = nPoints
nz = nPoints
ncells = nx * ny * nz
box = {'Lbox':[ Lbox, Lbox, Lbox ] }
# Cosmology parameters
cosmology = {}
cosmology['H0'] = 67.66
cosmology['Omega_M'] = 0.3111
cosmology['Omega_L'] = 0.6889
chem_type = 'HeII'
n_skewers_total = 6000
n_skewers_axis = n_skewers_total// 3
n_skewers_list = [ n_skewers_axis, n_skewers_axis, n_skewers_axis ]
axis_list = [ 'x', 'y', 'z' ]
n_skewers_proc_list = [ ]
for i in range( len(axis_list) ):
skewers_ids = range(n_skewers_list[i])
skewers_ids_proc = split_indices( skewers_ids, rank, nprocs, adjacent=True )
n_skewers_proc_list.append( len( skewers_ids_proc ))
if print_out: print(f"\nComputing LOS tau, s_nap:{n_snap} n_skewers:{n_skewers_total}" )
# Load skewer data
skewer_dataset = load_skewers_multiple_axis( axis_list, n_skewers_proc_list, n_snap, input_dir, load_HeII=True, set_random_seed=False, print_out=print_out)
current_z = skewer_dataset['current_z']
cosmology['current_z'] = current_z
los_density = skewer_dataset['density']
los_HI_density = skewer_dataset['HI_density']
los_HeII_density = skewer_dataset['HeII_density']
los_velocity = skewer_dataset['velocity']
los_temperature = skewer_dataset['temperature']
n_skewers = sum( n_skewers_proc_list )
skewers_ids = range(n_skewers)
processed_F = []
for i,skewer_id in enumerate(skewers_ids):
if i%(n_skewers//10)==0:
text = ' Skewer {0}/{1} {2:.0f} %'.format(i, n_skewers, float(i)/n_skewers*100)
# if rank == 0: print_line_flush( text )
if rank == 0: print( text )
skewer_data = {}
skewer_data['HI_density'] = los_HI_density[skewer_id]
skewer_data['temperature'] = los_temperature[skewer_id]
skewer_data['velocity'] = los_velocity[skewer_id]
skewer_data['HeII_density'] = los_HeII_density[skewer_id]
tau_los_data = compute_optical_depth( cosmology, box, skewer_data, space='redshift', method='error_function', chem_type=chem_type )
los_vel_hubble = tau_los_data['vel_Hubble']
los_tau = tau_los_data['tau']
los_F = np.exp( -los_tau )
processed_F.append( los_F )
processed_F = np.array( processed_F )
#Send the power spectrum to root process
if print_out: print( '\nGathering global data')
global_F = comm.gather( processed_F, root=0 )
if rank == 0:
global_F = np.concatenate( global_F )
n_processed = global_F.shape[0]
print( f'n_processed: {n_processed}, ps_data shape: {global_F.shape}' )
F_mean = global_F.mean()
tau = -np.log( F_mean )
print( f'z: {current_z:.2} F_mean: {F_mean:.2e} tau: {tau:.2e}')
file_name = output_dir + f'los_transmitted_flux_{n_snap}_{chem_type}.h5'
file = h5.File( file_name, 'w')
file.attrs['n_skewers'] = n_processed
file.attrs['current_z'] = current_z
file.attrs['F_mean'] = F_mean
file.create_dataset( 'los_F', data=global_F )
file.create_dataset( 'vel_Hubble', data=los_vel_hubble )
file.close()
print( f'Saved File: {file_name}')
| UTF-8 | Python | false | false | 4,237 | py | 178 | compute_los_tau.py | 159 | 0.668633 | 0.650224 | 0 | 140 | 29.242857 | 155 |
dpmanoj/Online-IPL-Ticket-Booking | 18,580,028,559,316 | a90fdb8d64073deb0d4f3f62cff83b24cf2d81ca | 6fef90be96ea4fe51076bb63d1b23d1b06192113 | /booking/migrations/0003_auto_20200430_1355.py | 752a0703f87c9a29d3a0f61c1acb4624e3a6d723 | []
| no_license | https://github.com/dpmanoj/Online-IPL-Ticket-Booking | 911df573fc957bce3384645fad8b4623784213d5 | c449a4155a69bc9850eb72244c7764a6a55b63d6 | refs/heads/main | 2022-12-29T17:55:14.102764 | 2020-10-21T01:56:05 | 2020-10-21T01:56:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.4 on 2020-04-30 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0002_booking_tickets_match_date'),
]
operations = [
migrations.AlterField(
model_name='booking_tickets',
name='match_date',
field=models.DateField(default='2020/12/20'),
),
]
| UTF-8 | Python | false | false | 414 | py | 43 | 0003_auto_20200430_1355.py | 33 | 0.599034 | 0.533816 | 0 | 18 | 22 | 57 |
ellmetha/django-precise-bbcode | 11,544,872,105,873 | 58c1d50482bd4ceff2380654a146d6c7476adab9 | 408f985c954ad6f0f3926e02dc18c31336bac194 | /precise_bbcode/core/utils.py | 8388589936970347e3e3e1dbc97d1e1a7f325542 | [
"BSD-3-Clause"
]
| permissive | https://github.com/ellmetha/django-precise-bbcode | 176258d9436cc29002d59f29d4964a3bdd05721e | 24306622fc8ebd91c8c79543c18050de0b32f1f1 | refs/heads/main | 2023-08-28T13:35:07.027756 | 2023-07-27T01:07:15 | 2023-07-27T01:07:15 | 13,904,807 | 36 | 16 | BSD-3-Clause | false | 2023-08-19T18:44:47 | 2013-10-27T16:45:03 | 2023-04-23T16:43:21 | 2023-08-19T18:44:46 | 715 | 31 | 10 | 17 | Python | false | false | from functools import reduce
def replace(data, replacements):
"""
Performs several string substitutions on the initial ``data`` string using
a list of 2-tuples (old, new) defining substitutions and returns the resulting
string.
"""
return reduce(lambda a, kv: a.replace(*kv), replacements, data)
| UTF-8 | Python | false | false | 322 | py | 63 | utils.py | 43 | 0.708075 | 0.704969 | 0 | 10 | 31.2 | 82 |
webmania-ma/webmania_projects | 6,725,918,815,559 | f070f5d3d61c0ae01d73cba1b8fa3a70e08b10ce | b2be278feb189924735880f5f539da1b2204510a | /avad_repos/avad_addons_v10/avad_ice_ip/models/company.py | df89dee39806e4f9a1a6628952dd461b768b4c79 | []
| no_license | https://github.com/webmania-ma/webmania_projects | b836c097506acada4600ce365aba41d8d24aac9c | 07dc8eb41c2900188e539282cc092491e760d201 | refs/heads/main | 2022-05-21T17:36:38.718358 | 2022-03-27T14:55:49 | 2022-03-27T14:55:49 | 277,064,743 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding: utf-8
from odoo import models, api, fields, _
from odoo.exceptions import Warning
import odoo.addons.decimal_precision as dp
class ResCompany(models.Model):
_inherit = 'res.company'
_description = 'Company'
ice = fields.Char(string='ICE', size=15,)
identifiant_tp = fields.Char(string=u'Identifiant TP', size=64,)
| UTF-8 | Python | false | false | 347 | py | 72 | company.py | 33 | 0.708934 | 0.694524 | 0 | 14 | 23.785714 | 68 |
dantordj/SemanticClustering | 1,279,900,292,915 | b07800358c04b9c0f50f5dd616dd064e1e8e7df8 | f8d601f7e3988d49d27c58fe423f1a189e832857 | /create_csv.py | 6bff1380baf6f4311d66b6c75f94626d68c75012 | []
| no_license | https://github.com/dantordj/SemanticClustering | 53cb55370ed775ab6afcd5595a6aa352a798772e | fd9148057052ebd424f251a59f21689aafa966d6 | refs/heads/master | 2020-03-31T16:49:08.612478 | 2018-10-19T16:33:50 | 2018-10-19T16:33:50 | 152,393,288 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import os
import string
from nltk.corpus import stopwords
import re
punct = string.punctuation.replace('-', '')
def clean(text):
clean_text = text.replace('"', '\"')
clean_text = clean_text.replace("``", "")
clean_text = clean_text.replace("/.", "")
clean_text = clean_text.replace("''", "")
clean_r = re.compile('<.*?>')
clean_re = re.compile('/.*? ')
clean_tab = re.compile(r'[\n\r\t]')
printable = set(string.printable)
clean_text = re.sub(clean_r, '', clean_text)
clean_text = re.sub(clean_re, ' ', clean_text)
clean_text = re.sub(clean_tab, ' ', clean_text)
clean_space = re.compile('[ ][ ]*')
clean_text = re.sub(clean_space, ' ', clean_text)
for sign in punct:
clean_text = clean_text.replace(" " + sign, sign)
return filter(lambda x: x in printable, clean_text).strip()
def create_init_df():
nct = string.punctuation.replace('-', '')
values = {"text": [], "real_cluster":[]}
i = 0
cluster_to_int = {}
count_clusters = 0
for element in os.listdir('brown'):
if element == '.DS_Store':
continue
cluster_str = element[0:2]
try:
cluster = cluster_to_int[cluster_str]
except KeyError:
cluster_to_int[cluster_str] = count_clusters
count_clusters += 1
cluster = cluster_to_int[cluster_str]
with open('brown/' + element) as fil:
text = fil.read()
# clean text
text = clean(text)
i += 1
values["text"] += [text]
values["real_cluster"] += [cluster]
df = pd.DataFrame(values)
print("Num clusters", count_clusters)
print(cluster_to_int)
df.to_csv("df_brown.csv")
return df
def create_init_df_sentence():
values = {"text": [], "real_cluster":[]}
i = 0
cluster_to_int = {}
count_clusters = 0
for element in os.listdir('brown'):
if element == '.DS_Store':
continue
cluster_str = element[0:2]
try:
cluster = cluster_to_int[cluster_str]
except KeyError:
cluster_to_int[cluster_str] = count_clusters
count_clusters += 1
cluster = cluster_to_int[cluster_str]
with open('brown/' + element) as fil:
text = fil.read()
text = clean(text)
text = text.split(". ")
# clean text
for sentence in text:
i += 1
values["text"] += [sentence]
values["real_cluster"] += [cluster]
df = pd.DataFrame(values)
df.to_csv("df_brown_sentence.csv")
return df
| UTF-8 | Python | false | false | 2,681 | py | 10 | create_csv.py | 9 | 0.538232 | 0.533756 | 0 | 87 | 29.770115 | 63 |
aserg24/course_work | 8,615,704,417,034 | a7fd23d839dc899007479e331b6e3c2c8f0dc059 | 101f6c22f052ef417c339a226bbf4c36c5b0ba4d | /spb_today.py | b8a23a9f9b0445399232e8fbee7bde8193a73b82 | []
| no_license | https://github.com/aserg24/course_work | 3a527e46e04698c716c423e22f533f971df01c2c | d65c14da986d038a929970b0769a6a4ed7d821e2 | refs/heads/master | 2021-01-20T16:00:31.300730 | 2017-05-10T01:54:15 | 2017-05-10T01:54:15 | 90,809,101 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime as dt
import vk
from tqdm import tqdm
from sqlalchemy import func, create_engine
from sqlalchemy import Column, Integer, Text, DateTime
from sqlalchemy.schema import Index
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Post(Base):
__tablename__ = 'post'
__table_args__ = (Index('post_id_idx', 'post_id'), )
id = Column(Integer, primary_key=True)
text = Column(Text, nullable=False)
author_id = Column(Integer, nullable=False)
post_id = Column(Integer, nullable=False)
date = Column(DateTime, nullable=False)
last_offset = Column(Integer, nullable=False)
def init_db(db_url):
engine = create_engine(db_url)
Base.metadata.bind = engine
Base.metadata.create_all()
return sessionmaker(bind=engine)
def validate_post(post_response):
return (('from_id' in post_response) and
(post_response['marked_as_ads'] == 0) and
(post_response['post_type'] == 'post'))
if __name__ == '__main__':
db_session = init_db('sqlite:///test.db')()
vk_session = vk.Session()
api = vk.API(vk_session)
if db_session.query(Post).count() == 0:
last_offset = 0
else:
last_offset = db_session.query(func.max(Post.last_offset)).first()[0]
total_posts = api.wall.get(domain='spb_today', count=0)[0]
with tqdm(total=total_posts) as progress:
progress.update(last_offset)
while last_offset < total_posts:
offset = total_posts - last_offset - 100
response = api.wall.get(domain='spb_today', offset=max(offset, 0),
count=100)
posts = [Post(text=x['text'],
author_id=x['from_id'],
post_id=x['id'],
date=dt.datetime.fromtimestamp(x['date']),
last_offset=last_offset + (len(response) - 1 - i))
for i, x in enumerate(response[1:]) if validate_post(x)]
db_session.add_all(posts)
db_session.commit()
total_posts = response[0]
last_offset = db_session.query(
func.max(Post.last_offset)).first()[0]
progress.update(len(posts))
print(db_session.query(Post)[db_session.query(Post).count() - 1].text)
print(db_session.query(Post)[db_session.query(Post).count() - 1].from_id)
#for post in db_session.query(Post):
# print(post.date)
| UTF-8 | Python | false | false | 2,559 | py | 3 | spb_today.py | 2 | 0.599062 | 0.591247 | 0 | 84 | 29.464286 | 78 |
AlexLan73/NW_052 | 11,458,972,750,799 | 0979d33727d2c348776ec0de8e4088ef2dcf1328 | dda92c918e183c376fee172c88d9eb2d8b1c386f | /NW_052/main.py | 7d19413edcbd2d13479ffbea7d29467d904be155 | []
| no_license | https://github.com/AlexLan73/NW_052 | dbcf8a49582bcd591488d197a763f39695865bdc | a38fe65083e882861b3ff333fa6661bce9c6bf07 | refs/heads/master | 2020-12-14T10:10:19.260278 | 2020-01-18T08:37:08 | 2020-01-18T08:37:08 | 234,705,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ParsingDan as pdan
import NWModel as nwm
def _form_dan(dan, category, param):
maxWordsCount, xLen, step, type_dan, type_dan0 = param[0], param[1], param[2], param[3], param[4]
par= pdan.ParsingDan(dan, category) # стандартный
xtrain, xtest= par.sprlit_train_test()
if type_dan == 1:
xtrain, xtest= par.normalization_test_dan(xtrain, xtest)
form_dan= nwm.FormDanToAI0(xtrain, xtest, category, maxWordsCount)
trein_text, test_text, tokenizer = form_dan.train_test_text() # maxWordsCount=100
#Формируем обучающую и тестовую выборку
xTrain, yTrain = form_dan.createSetsMultiClasses(form_dan.trainWordIndexes, xLen, step) #извлекаем обучающую выборку
xTest, yTest = form_dan.createSetsMultiClasses(form_dan.testWordIndexes, xLen, step) #извлекаем тестовую выборку
if type_dan0 == 1:
return xTrain, yTrain, xTest, yTest, category
#Преобразовываем полученные выборки из последовательности индексов в матрицы нулей и единиц по принципу Bag of Words
xTrain01 = tokenizer.sequences_to_matrix(xTrain.tolist()) # подаем xTrain в виде списка чтобы метод успешно сработал
xTest01 = tokenizer.sequences_to_matrix(xTest.tolist()) # подаем xTest в виде списка чтобы метод успешно сработал
return xTrain01, yTrain, xTest01, yTest, category
if __name__ == "__main__":
# path_treatment="E:/Python/NW_05/Болезни"
path_treatment="O:/Python/NW/NW_052/Болезни"
p_dan = pdan.ReadDanDisk("os", d=dict(dir=path_treatment))
dan, category = p_dan.read_dir()
#param= [100, 50, 10, 0, 0]
# maxWordsCount, xLen, step,
# 0/1 не сбалансированные/сбалансированные данные,
# 0/1 данные в форрмате "колбасы"/Embedding
param=[110, 50, 10, 1, 0] #[110, 50, 10, 0]
nwm_=nwm.NWModel(_form_dan(dan, category, param))
nwm_.model_000()
# с балансированные входные данные
# param=[110, 50, 10, 1, 0]
# nwm_=nwm.NWModel(_form_dan(dan, category, param))
# nwm_.model02_01_balans()
# Test c Embedding сбалансированные
# param=[100, 50, 10, 1, 1]
# nwm_=nwm.NWModel(_form_dan(dan, category, param))
# nwm_.model_Embedding_0(100, 50, 30)
| UTF-8 | Python | false | false | 2,607 | py | 14 | main.py | 3 | 0.670424 | 0.630298 | 0 | 48 | 45.208333 | 142 |
nakamotohideyoshi/job-pitch | 10,995,116,316,203 | 941f8ba7d1f298bda62ebfd7d0615833833c0a0f | 452901ad0d4129228468ffe1c1a933ccd9481d25 | /web/src/mjp/migrations/0011_auto_20150618_2043.py | 643829126bcea013261de3d3abf53545994fec88 | []
| no_license | https://github.com/nakamotohideyoshi/job-pitch | d409e49403346e4162f967a5d1bcf5edbb453063 | 5ed7ab4cf74b504d68190c678ca5d17fe4e92c19 | refs/heads/master | 2022-05-19T00:16:10.521704 | 2018-12-17T18:02:30 | 2018-12-17T18:02:30 | 224,891,428 | 0 | 0 | null | false | 2022-04-09T04:28:59 | 2019-11-29T16:45:31 | 2019-12-06T09:22:33 | 2022-04-09T04:28:58 | 187,127 | 0 | 0 | 20 | Java | false | false | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mjp', '0010_message_created'),
]
operations = [
migrations.CreateModel(
name='Pitch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('video', models.FileField(max_length=255, upload_to=b'pitch/%Y/%m/%d')),
('thumbnail', models.ImageField(max_length=255, upload_to=b'pitch/%Y/%m/%d')),
],
),
migrations.AlterModelOptions(
name='message',
options={'ordering': ('created',)},
),
migrations.AddField(
model_name='jobseeker',
name='pitch',
field=models.ForeignKey(related_name='job_seeker', to='mjp.Pitch', null=True),
),
]
| UTF-8 | Python | false | false | 965 | py | 582 | 0011_auto_20150618_2043.py | 501 | 0.546114 | 0.534715 | 0 | 31 | 30.129032 | 114 |
Vikrant3/finance_peer_demo_vikrant | 764,504,189,036 | 36b7d3d7527233122c72b17ce0dbb78380427edf | 58702cc964bf1f612a1b2a1b75802e1dd3facff0 | /scripts_to_save.py | 751daa953e0f7cfbe105258a3979849641d5fd25 | []
| no_license | https://github.com/Vikrant3/finance_peer_demo_vikrant | 689420a3d23cbd610692180cb566cb3b4805840d | 24b25d7324944d3c7a599f3f56e0644c54e59103 | refs/heads/main | 2023-07-12T17:58:13.531051 | 2021-08-21T13:39:20 | 2021-08-21T13:39:20 | 398,563,290 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from demo_app.models import FinancePeerUserData
from demo_app.fp_data import data
list_to_save = []
def save_finace_peer_data():
for d in data:
fpd = FinancePeerUserData(user_id=d['userId'], item_id=d['id'], title=d['title'], body=d['body'])
list_to_save.append(fpd)
FinancePeerUserData.objects.bulk_create(list_to_save, batch_size=500)
| UTF-8 | Python | false | false | 363 | py | 6 | scripts_to_save.py | 4 | 0.69146 | 0.683196 | 0 | 10 | 35.3 | 105 |
yangchun222001/algorithm_python | 8,031,588,887,184 | a66f50e06ecbaa0b6c788eb3e01d54e886b868c5 | 52beb782b5233620816c11da742837c4812679f4 | /construct_binary_tree_from_inorder_and_postorder_traversal.py | 77bdcf6058f290ff3b454fd7416263efa6c7eb8c | []
| no_license | https://github.com/yangchun222001/algorithm_python | bce51c2caf82da03a77e7fd85fce7c4e872b8303 | 8637653c937df9b7ce52d10b712c4d31858a8f5c | refs/heads/master | 2021-01-18T18:54:55.581204 | 2017-04-01T02:42:42 | 2017-04-01T02:42:42 | 86,876,577 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Given inorder and postorder traversal of a tree, construct the binary tree.
Notice
You may assume that duplicates do not exist in the tree.
Example
Given inorder [1,2,3] and postorder [1,3,2], return a tree:
2
/ \
1 3
'''
#Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param inorder : A list of integers that inorder traversal of a tree
@param postorder : A list of integers that postorder traversal of a tree
@return : Root of a tree
"""
def buildTree(self, inorder, postorder):
# write your code here
if postorder is None or inorder is None:
return None
if len(postorder) == 0 or len(inorder) == 0:
return None
val = postorder.pop()
index = inorder.index(val)
left_inorder = inorder[:index]
right_inorder = inorder[index + 1:]
left_postorder = postorder[:index]
right_postorder = postorder[index:]
node = TreeNode(val)
left = self.buildTree(left_inorder, left_postorder)
right = self.buildTree(right_inorder, right_postorder)
node.left = left
node.right = right
return node | UTF-8 | Python | false | false | 1,276 | py | 45 | construct_binary_tree_from_inorder_and_postorder_traversal.py | 44 | 0.619906 | 0.610502 | 0 | 52 | 23.557692 | 76 |
cagostino/cloudyMCMC | 1,271,310,328,491 | 02efa31eed799fe6bc0b64c0f6a4fcb4047cb317 | 7513dc9de3b2b792b7a6ce86ecc2059114496ac1 | /mcmc_tests.py | 9e9368fcc0ab4d1878df7baf8fde525d96d690b4 | []
| no_license | https://github.com/cagostino/cloudyMCMC | 501798ba667dca3cbaa9f0110b268c0371b08d5f | 522fe922552d1dc247f3973632994c117331d321 | refs/heads/master | 2020-10-01T23:35:02.822718 | 2019-12-12T16:26:55 | 2019-12-12T16:26:55 | 227,647,723 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from MCMC import MCMC
from scipy.stats import sem
import pyCloudy as pc
from PCloudy import PCloudy
plt.rc('font',family='serif')
plt.rc('text',usetex=True)
def poly(x, pars):
return np.sum(np.array([ pars[i]*x**(len(pars)-1- i) for i in range(len(pars))]), axis=0)
def polyperiodic(x, pars):
return pars[0]*np.cos(pars[1]*x)+np.sum(np.array([ pars[i]*x**(len(pars)-1- i) for i in range(2,len(pars))]), axis=0)
def per(x, pars):
return pars[0]*np.sin(x*pars[1])
'''Metropolis Hastings'''
x_vals = np.linspace(-15,15, 400)
period = [1.1,0.7 ]
linperiod = [2, 4, 2, 2.2]
linpars = [3.7, 2.2]
quadpars = [-1.1,2.3,-3.5]
cubicpars = [-2.1, 2.3, 4.5, -10]
quarticpars = [4.2,1.1, -4.3, -2.1, 14]
quinticpars = [-0.8, -6.2, 1.4, 3.2, 2.1, 4.8]
linper_true = polyperiodic(x_vals, linperiod)
per_true = per(x_vals, period)
lin_true = poly(x_vals, linpars)
quad_true = poly(x_vals,quadpars)
cubic_true = poly(x_vals,cubicpars)
quartic_true = poly(x_vals,quarticpars)
quintic_true = poly(x_vals,quinticpars)
linper_comp = linper_true+np.random.normal(scale=0.1,size=len(linper_true))
per_comp = per_true+np.random.normal(scale=0.2,size=len(per_true))
lin_comp = lin_true + np.random.normal(size=len(lin_true))
quad_comp = quad_true+np.random.normal(size=len(x_vals))
cubic_comp = cubic_true+np.random.normal(size=len(x_vals))
quartic_comp = quartic_true+np.random.normal(size=len(x_vals))
quintic_comp = quintic_true+np.random.normal(size=len(x_vals))
def test_n_chains(plot_n=False, plot_chi=False):
n_chain_arr= np.int64(10**(np.arange(2,5.5,0.5)))
runtimes = []
runs = []
for n_chain in n_chain_arr:
lm = MCMC(poly, x_vals, lin_comp, [2,2],[[-10,10],[-30,30]],n_iter=100, n_chains = n_chain )
lm.run_mcmc()
runs.append(lm)
runtimes.append(lm.time_total)
runtimes = np.array(runtimes)
bs = []
ms = []
chi_min = []
for mc in runs:
finpars = mc.finalpars
bs.append([finpars[1]])
ms.append([finpars[0]])
chi_min.append(np.min(mc.all_probs))
ms = np.array(ms)
mdiffs = np.abs(ms - linpars[0])
bs = np.array(bs)
bdiffs = np.abs(bs - linpars[1])
chi_min =np.array(chi_min)
if plot_chi:
plt.scatter(np.log10(n_chain_arr), ms, color='k' )
plt.axhline(y=linpars[0],label='True m')
plt.xlabel('log(N)')
plt.ylabel('m-value')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
plt.scatter(np.log10(n_chain_arr), bs, color='k' )
plt.axhline(y=linpars[1],label='True b')
plt.xlabel('log(N)')
plt.ylabel('b-value')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
plt.scatter(np.log10(n_chain_arr), -np.log10(-chi_min), color='k' )
plt.xlabel('log(N)')
plt.ylabel(r'log($\chi^2$ min)')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
if plot_n:
plt.scatter(np.log10(n_chain_arr), np.log10(runtimes), color='k' )
m, b = np.polyfit(np.log10(n_chain_arr), np.log10(runtimes), 1)
plt.plot(np.log10(n_chain_arr),m*np.log10(n_chain_arr)+b,'k-.', label='m = '+str(m)[0:5])
plt.xlabel('log(N)')
plt.ylabel('log(Run-time) [s]')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
def test_n_iter(plot_n=False, plot_chi=False):
n_iter_arr= np.int64(10**(np.arange(1,4.5,0.5)))
runtimes = []
runs = []
for n_iter in n_iter_arr:
lm = MCMC(poly, x_vals, lin_comp, [2,2],[[-10,10],[-30,30]],n_iter=n_iter, n_chains = 1000 )
lm.run_mcmc()
runs.append(lm)
runtimes.append(lm.time_total)
runtimes = np.array(runtimes)
bs = []
ms = []
chi_min = []
for mc in runs:
finpars = mc.finalpars
bs.append([finpars[1]])
ms.append([finpars[0]])
chi_min.append(np.min(mc.all_probs))
ms = np.array(ms)
mdiffs = np.abs(ms - linpars[0])
bs = np.array(bs)
bdiffs = np.abs(bs - linpars[1])
chi_min =np.array(chi_min)
if plot_chi:
plt.scatter(np.log10(n_iter_arr), ms, color='k' )
plt.axhline(y=linpars[0],label='True m')
plt.xlabel('log(N)')
plt.ylabel('m-value')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
plt.scatter(np.log10(n_iter_arr), bs, color='k' )
plt.axhline(y=linpars[1],label='True b')
plt.xlabel('log(N)')
plt.ylabel('b-value')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
plt.scatter(np.log10(n_iter_arr), -np.log10(-chi_min), color='k' )
plt.xlabel('log(N)')
plt.ylabel(r'log($\chi^2$ min)')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
if plot_n:
plt.scatter(np.log10(n_iter_arr), np.log10(runtimes), color='k' )
m, b = np.polyfit(np.log10(n_iter_arr), np.log10(runtimes), 1)
plt.plot(np.log10(n_iter_arr),m*np.log10(n_iter_arr)+b,'k-.', label='m = '+str(m)[0:5])
plt.xlabel('log(N)')
plt.ylabel('log(Run-time) [s]')
plt.legend(frameon=False, fontsize=20)
plt.tight_layout()
def test_mods():
n_iter=100
n_chains=10000
linperlims = [[1.,3.],[3,5],[1.,3.],[1.,3.]]
linpermcmc= MCMC(polyperiodic,x_vals, linper_comp, [2.5,3,2,2], linperlims,n_iter=n_iter, n_chains=n_chains)
linpermcmc.run_mcmc()
perlims = [[1.0,1.3],[0.6,0.9]]
permcmc= MCMC(per, x_vals, per_comp, [1,1], perlims, n_iter=n_iter, n_chains=n_chains)
permcmc.run_mcmc()
linlims = [[-10,10],[-30,30]]
linmcmc = MCMC(poly,x_vals, lin_comp, [2,2],linlims, n_iter=n_iter, n_chains=n_chains)
linmcmc.run_mcmc()
quadlims = [[-10,10],[-10,10],[-30,30]]
quadmcmc = MCMC(poly, x_vals, quad_comp, [1,1,1], quadlims, n_iter=n_iter, n_chains=n_chains)
quadmcmc.run_mcmc()
cubiclims = [[-10,10],[-10,10],[-10,10],[-30,30]]
cubicmcmc = MCMC(poly, x_vals, cubic_comp, [1,1,1,1], cubiclims, n_iter=n_iter, n_chains=n_chains)
cubicmcmc.run_mcmc()
quarticlims = [[-10,10],[-10,10],[-10,10],[-10,10],[-30,30]]
quarticmcmc = MCMC(poly, x_vals, quartic_comp, [1,1,1,1,1], quarticlims, n_iter=n_iter, n_chains=n_chains)
quarticmcmc.run_mcmc()
quinticlims = [[-10,10],[-10,10],[-10,10],[-10,10],[-10,10],[-30,30]]
quinticmcmc = MCMC(poly, x_vals, quintic_comp, [1,1,1,1,1,1], quinticlims, n_iter=n_iter, n_chains=n_chains)
quinticmcmc.run_mcmc()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.tight_layout()
def test_cloudy():
dens = 2 #log cm^-3
Teff = 45000. #K
qH = 47. #s-1, ionizing phot /second
r_min = 5e17 #cm,
dist = 1.26 #kpc, how far is the region from us
cloudypars = np.array([dens, Teff, qH, r_min, dist])
pclo = PCloudy(cloudypars)
pclo.cloudy()
Mod = pclo.Mod
wl = Mod.get_cont_x(unit='Ang')
intens = Mod.get_cont_y(cont='ntrans',unit='Jy')
opt=np.where((wl>4000) & (wl<8000))[0]
wl_opt = wl[opt]
intens_opt = intens[opt]
initpars = cloudypars + np.random.normal(size=5)
cloudylims = [[1,3],[40000,50000],[46,49],[1e17,1e18],[1,2]]
cloudyMCMC = MCMC(cloudy_model, wl_opt, intens_opt, initpars, cloudylims, n_iter=10, n_chains=100, sample_rate=1)
cloudyMCMC.run_mcmc()
def cloudy_model(wl_ran, pars):
pclo = PCloudy(pars)
pclo.cloudy()
Mod = pclo.Mod
wl = Mod.get_cont_x(unit='Ang')
intens = Mod.get_cont_y(cont='ntrans',unit='Jy')
opt=np.where((wl>4000) & (wl<8000))[0]
intens_opt = intens[opt]
return intens_opt | UTF-8 | Python | false | false | 7,813 | py | 4 | mcmc_tests.py | 3 | 0.580187 | 0.530782 | 0 | 230 | 32.973913 | 121 |
dagomankle/IGN-Project | 14,800,457,352,254 | b239b6904280035ecc9e60e47326a4696c41c6ed | 69711058c42bd9de890a62d7c7f9e644257f22a6 | /ViejoBaul/Partitioner.py | c92fd25d742efe738b26e5a0507b2c8d5be5605c | []
| no_license | https://github.com/dagomankle/IGN-Project | e0ca67d2b1db6d463c71dfa7ad492fc98104eae0 | 43de1f09a90b5b9481494c314f2c821a11ef1607 | refs/heads/master | 2020-03-12T17:47:47.222669 | 2019-06-22T12:16:41 | 2019-06-22T12:16:41 | 130,744,866 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from obspy.core import read
from obspy.core import UTCDateTime
from obspy.signal.trigger import plot_trigger# importaciones obligatiores menos read dependiendo
from obspy.clients.arclink import Client# para generar la coneccion con la base de datos
thr_on = 1.1
thr_off = 0.99
tiempoSismo = 360
tiempoAntes = tiempoSismo/2
tiempoDespues = tiempoSismo/2
clienteArclink = Client('test', '192.168.1.7', 18001)# coneccion al stream
#from obspy.signal.trigger import classic_sta_lta # si se quiere clasico
from obspy.signal.trigger import recursive_sta_lta# si se quiere recursivo etc
t = UTCDateTime('2017-01-01 05:00:00') # +5 horas para utc y poner la fecha deseada.
trace5 = clienteArclink.get_waveforms('EC', 'CAYR', '', 'SHZ',t,t+3600*2,route = False, compressed = False)[0] # tiempo adicional deseado en segundos
puntoAlto = 0
for x in range(0, trace5.count()):
if(trace5[x] == trace5.max()):
puntoAlto = x
trace5 = trace5.slice(t+0.01*puntoAlto -tiempoAntes, t+0.01*puntoAlto+tiempoDespues)
df5 = trace5.stats.sampling_rate
cft5 = recursive_sta_lta(trace5.data, int(2.5*df5), int(5*df5)) # define los tmanios de ventana
plot_trigger(trace5,cft5,thr_on,thr_off) # se define la variacion a marcar
# mseed 100 muestras por segundo cada posicion del arreglo representa un dato obtenido en 1/100 de segundo | UTF-8 | Python | false | false | 1,320 | py | 33 | Partitioner.py | 19 | 0.756061 | 0.698485 | 0 | 33 | 39.030303 | 150 |
skar2/Disease-Information-Website | 5,076,651,356,365 | 4a0928eb96a80492299917e120362e5ea22c2715 | d13f42768adf17da68938aee295793bf43748948 | /DIPROJECT/home/migrations/0013_info_img.py | da48654b3f88dc88b6a26bf37f1b3aa6cdac7aa9 | []
| no_license | https://github.com/skar2/Disease-Information-Website | da2ebaa7029256e3f2ff39bb016156e499c8416d | 42badbf05ddee3c12665d31589eae0650e2db020 | refs/heads/master | 2023-06-20T12:28:38.104712 | 2021-07-20T13:20:42 | 2021-07-20T13:20:42 | 328,928,013 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.4 on 2020-12-24 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0012_auto_20201223_1949'),
]
operations = [
migrations.AddField(
model_name='info',
name='img',
field=models.ImageField(default='', upload_to=''),
),
]
| UTF-8 | Python | false | false | 388 | py | 28 | 0013_info_img.py | 17 | 0.569588 | 0.489691 | 0 | 18 | 20.555556 | 62 |
Blackbird-io/mechanics-prod | 11,235,634,493,401 | bbc46e946324d0ba306dfb75a9bd335aef54d962 | fbbd431467b46cf2ce8003e14a6c6190bea5ba82 | /data_structures/portal/__init__.py | 1de785a7e2aed52f3b820935cb7f94512b2867b9 | []
| no_license | https://github.com/Blackbird-io/mechanics-prod | 187412dae4314e33990b2c5e83c019aafd8a1594 | a5fa9b603c76928751e17cf9b6be3ea795107000 | refs/heads/master | 2023-03-03T17:49:55.032442 | 2017-09-22T19:54:59 | 2017-09-22T19:54:59 | 337,511,595 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # PROPRIETARY AND CONFIDENTIAL
# Property of Blackbird Logical Applications, LLC
# Copyright Blackbird Logical Applications, LLC 2016
# NOT TO BE CIRCULATED OR REPRODUCED WITHOUT PRIOR WRITTEN APPROVAL
# Blackbird Environment
# Module: DataStructures.Analysis.__init__
"""
Package that contains objects that Blackbird uses to analyze models.
==================== ==========================================================
Attribute Description
==================== ==========================================================
DATA:
n/a
FUNCTIONS:
n/a
CLASSES:
n/a
==================== ==========================================================
"""
#imports
#leave blank intentionally
| UTF-8 | Python | false | false | 736 | py | 127 | __init__.py | 123 | 0.47962 | 0.474185 | 0 | 30 | 22.533333 | 80 |
shalevy1/json2tree | 16,286,516,025,814 | dbb6971b3e0910b14d1506fc7d5f03012441d21d | 165c0958cbe9c127357082a7c0ea192b4ec4610a | /json2tree/html.py | 89fa994419adc74764f64946e1671f3fd774daf1 | [
"MIT"
]
| permissive | https://github.com/shalevy1/json2tree | 58be5d47571ef317bce4d628fa77fb89ef7a7c39 | ff7fa54bc06de4b341a8a885dc30cb6a713359a1 | refs/heads/main | 2023-03-11T02:49:27.196591 | 2021-02-26T04:34:40 | 2021-02-26T04:34:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | head = """
<!DOCTYPE html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="https://fonts.googleapis.com/css2?family=Inconsolata:wght@300&family=Oswald&display=swap"
rel="stylesheet"><link href="https://fonts.googleapis.com/css2?family=Josefin+Sans:ital,wght@1,300&display=swap"
rel="stylesheet">
<link href="https://fonts.googleapis.com/css2?family=Inconsolata&display=swap" rel="stylesheet">
%s \n
</head>
<body>
<div class="header">
<img src="https://raw.githubusercontent.com/tern-tools/tern/master/docs/img/tern_logo.png" height="60px">\n
<br>
</div>\n
<div style="font-family: \'Inconsolata\', monospace;">
<p>
Tern at %s
<p>
The following report was generated for "%s" image.
</div>
"""
def list_handler(list_obj, indent):
'''Write html code for lists in report dictionary'''
html_string = ''
for i, _ in enumerate(list_obj):
if isinstance(list_obj[i], dict):
if "name" in list_obj[i].keys():
html_string = html_string + ' '*indent + \
'<li><span class="caret">' + str(list_obj[i]["name"]) + \
' : ' + '</span> \n '
else:
html_string = html_string + ' '*indent + \
'<li><span class="caret">' + str(i) + ' : ' + '</span> \n '
html_string = html_string + dict_handler(list_obj[i], indent+1)
html_string = html_string + ' '*indent + '</li> \n '
elif isinstance(list_obj[i], list):
html_string = html_string + ' '*indent + \
'<li><span class="caret">' + str(i) + ' : ' + '</span> \n '
html_string = html_string + ' '*indent + \
'<ul class ="nested"> \n '
html_string = html_string + list_handler(list_obj[i], indent+1)
html_string = html_string + ' '*indent + '</ul> \n ' + \
' '*indent + '</li>\n '
else:
html_string = html_string + ' '*indent + '<li>' + \
'<span class="text-c">' + str(list_obj[i]) + \
'</span>\n</li> \n '
return html_string
# pylint: disable=too-many-branches
def dict_handler(dict_obj, indent):
'''Writes html code for dictionary in report dictionary'''
html_string = ''
html_string = html_string + ' '*indent + '<ul class ="nested"> \n'
for k, v in dict_obj.items():
if isinstance(v, dict):
if "name" in v.keys():
html_string = html_string + ' '*indent + \
'<li><span class="caret">' + str(v["name"]) + ' : ' + \
'</span> \n '
else:
html_string = html_string + ' '*indent + \
'<li><span class="caret">' + str(k) + ' : ' + '</span> \n '
html_string = html_string + dict_handler(v, indent+1) + \
' '*indent + '</li> \n '
elif isinstance(v, list):
html_string = html_string + ' '*indent + \
'<li><span class="caret">' + str(k) + ' : ' + \
'[%d]' % (len(v)) + '</span> \n '
html_string = html_string + ' '*indent + \
'<ul class ="nested"> \n ' + list_handler(v, indent+1) + \
' '*indent + '</ul> \n ' + ' '*indent + '</li> \n '
else:
html_string = html_string + ' '*indent + \
'<li><span class="text-h">' + str(k) + ' : ' + \
'</span><span class="text-c">' + str(v) + '</span></li>\n'
html_string = html_string + ' '*indent + '</ul> \n '
return html_string
def report_dict_to_html(dict_obj):
'''Writes html code for report'''
html_string = ''
html_string = html_string + '<ul class ="myUL"> \n'
html_string = html_string + \
'<li><span class="caret">REPORT DETAILS</span> \n'
html_string = html_string + dict_handler(dict_obj, 0)
html_string = html_string + '</li></ul> \n'
return html_string
def write_licenses(image_obj_list):
'''Adds licenses to top of the page'''
licenses = get_licenses_only(image_obj_list)
html_string = ''
html_string = html_string + '<ul class ="myUL"> \n'
html_string = html_string + '<li><span class="caret">Summary of \
Licenses Found</span> \n'
html_string = html_string + '<ul class ="nested"> \n'
for lic in licenses:
html_string = html_string + \
'<li style="font-family: \'Inconsolata\' , monospace;" >' + \
lic + '</li>\n'
html_string = html_string + '</ul></li></ul> \n'
return html_string
def create_html_report(report_dict, image_obj_list):
'''Return the html report as a string'''
logger.debug("Creating HTML report...")
report = ''
report = report + '\n' + head % (css, get_tool_version(),
report_dict['images'][0]['image']['name']
+ ':' +
report_dict['images'][0]['image']['tag'])
report = report + '\n' + write_licenses(image_obj_list)
report = report + '\n' + report_dict_to_html(report_dict)
report = report + '\n' + js
report = report + '\n' + '</body>\n</html>\n'
return report
def get_report_dict(image_obj_list):
'''Given an image object list, return a python dict of the report'''
image_list = []
for image in image_obj_list:
image_list.append({'image': image.to_dict()})
image_dict = {'images': image_list}
return image_dict
def generate(self, image_obj_list):
'''Given a list of image objects, create a html report
for the images'''
report_dict = get_report_dict(image_obj_list)
report = create_html_report(report_dict, image_obj_list)
return report | UTF-8 | Python | false | false | 5,736 | py | 5 | html.py | 2 | 0.526674 | 0.523187 | 0 | 139 | 40.273381 | 112 |
kpan2034/Computer-Vision-Object-Tracking | 5,308,579,599,699 | f9b6eb124c02dbe428dcb9bf1a7d4ea7693afd7b | bedf8b841d4f7049d0841e883f9074878bffc472 | /particle_filter.py | 82e5f59a717cc90a91fbedecca0813d6948e231a | []
| no_license | https://github.com/kpan2034/Computer-Vision-Object-Tracking | 9493ed6f60559556bcc2b9ae883fa120d5921746 | 4c1c37ce2baa33896f3b22dc0d403ad2967b001f | refs/heads/master | 2022-11-07T04:06:33.037183 | 2020-06-25T11:04:31 | 2020-06-25T11:04:31 | 273,983,774 | 0 | 0 | null | true | 2020-06-21T21:07:10 | 2020-06-21T21:07:09 | 2020-06-21T21:03:24 | 2020-06-21T21:03:22 | 51,008 | 0 | 0 | 0 | null | false | false | import cv2
import numpy as np
from numpy.random import randn
from numpy.random import uniform
import scipy.stats
from filterpy.monte_carlo import systematic_resample
from filterpy.monte_carlo import residual_resample
from filterpy.monte_carlo import stratified_resample
from filterpy.monte_carlo import multinomial_resample
# To initialize particles before tracking starts
def create_uniform_particles(x_range, y_range, N):
particles = np.empty((N, 2))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
return particles
# To generate new particles during tracking
def create_gaussian_particles(mean, std, N):
particles = np.empty((N, 2))
particles[:, 0] = mean[0] + (randn(N) * std[0])
particles[:, 1] = mean[1] + (randn(N) * std[1])
return particles
# Move particles based on estimated velocity of the object
def predict(particles, vel=None, std=15):
N = len(particles)
if vel is None or vel != vel: # NaN check
vel = [0, 0]
particles[:, 0] += vel[0] + (randn(N) * std)
particles[:, 1] += vel[1] + (randn(N) * std)
# Find out how likely the particles are based on their proximity to the object
def update(particles, weights, method, posm):
pos = np.empty((len(particles), 2))
pos[:, 0].fill(posm[0])
pos[:, 1].fill(posm[1])
# Get dist of each particle from measured position
dist = np.linalg.norm(particles - pos, axis=1)
if method == 'linear':
# linear weighting:
max_dist = np.amax(dist)
dist = np.add(-dist, max_dist)
weights.fill(1.0)
weights *= dist
elif method == 'gaussian':
# Assign probabilities as Gaussian dist w/ mean=0 and std=X
weights *= scipy.stats.norm.pdf(dist, loc=0, scale=15)
else:
print("Error: No such method to update weights.")
exit(0)
# to avoid zero weights
weights += 1.e-300
# normalize
weights /= sum(weights)
def estimate(particles, weights):
mean = np.average(particles, weights=weights, axis=0)
var = np.average((particles - mean) ** 2, weights=weights, axis=0)
return mean, var
# Resample function
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights.resize(len(particles))
weights.fill(1.0 / len(weights))
# Find meaningful particles, if neff < th then resample; (th = N/2)
def neff(weights):
return 1.0 / np.sum(np.square(weights))
def getCentreFromWindow(win):
xc = win[0] + win[2] / 2
yc = win[1] + win[3] / 2
centre = (xc, yc)
return centre
def getTrackWindow(centre, win):
x = int(np.floor(centre[0] - win[2] / 2))
y = int(np.floor(centre[1] - win[3] / 2))
track_window = (x, y, win[2], win[3])
return track_window
def evalParticle(backproj, particlesT):
p = [int(x) for x in particlesT[0]]
q = [int(x) for x in particlesT[1]]
return backproj[q, p]
def resample(particles, weights, th, method):
if neff(weights) < th:
indexes = []
if method == 0:
indexes = systematic_resample(weights)
elif method == 1:
indexes = multinomial_resample(weights)
elif method == 2:
indexes = stratified_resample(weights)
elif method == 3:
indexes = residual_resample(weights)
resample_from_index(particles, weights, indexes)
assert np.allclose(weights, 1 / len(particles))
def estVelocity(x_pos, y_pos, t_count=15, poly=1):
if len(x_pos) > t_count:
x_pos = x_pos[-t_count:]
y_pos = y_pos[-t_count:]
elif len(x_pos) < t_count:
return None
t = list(range(1, len(x_pos) + 1))
x_model = np.polyfit(t, x_pos, 1)
y_model = np.polyfit(t, y_pos, 1)
x_vel = np.poly1d(x_model)
y_vel = np.poly1d(y_model)
return [(x_vel(t[-1]) - x_vel(t[0])) / (t[-1] - t[0]), (y_vel(t[-1]) - y_vel(t[0])) / (t[-1] - t[0])]
# To draw particles on the frame
def draw_particles(image, particles, color=(0, 0, 255)):
for p in particles:
image = cv2.circle(image, (int(p[0]), int(p[1])), 1, color, -1)
# To draw bounding box on the frame
def draw_box(im, estimated, measured):
if measured is None:
measured = [(0, 0), (0, 0)]
estimated = [int(x) for li in estimated for x in li]
measured = [int(x) for li in measured for x in li]
im = cv2.rectangle(im, (estimated[0], estimated[1]), (estimated[2], estimated[3]), (255, 0, 0), 2)
im = cv2.rectangle(im, (measured[0], measured[1]), (measured[2], measured[3]), (0, 255, 0), 2)
| UTF-8 | Python | false | false | 4,758 | py | 16 | particle_filter.py | 9 | 0.599622 | 0.573981 | 0 | 148 | 30.148649 | 105 |
EsmeraldaQuintana/MOSTregi | 6,932,077,263,117 | c39b791097ba6195a3ba84fb42c1d8c7bc9f3fe3 | eece1023344ee4a2f857892290080a96472367f2 | /MOSTregi/events/migrations/0081_auto_20180312_1011.py | c6a8d9a2dabb118ba7359694d76abb6205d2e356 | []
| no_license | https://github.com/EsmeraldaQuintana/MOSTregi | 6d883a9695625af92d963c9fd350c86eaf4a794d | e9d0a5f4da17465791d4021f562dd270838c4201 | refs/heads/master | 2022-12-10T05:56:07.289295 | 2018-04-13T12:21:16 | 2018-04-13T12:21:16 | 129,149,016 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.2 on 2018-03-12 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0080_auto_20180311_2212'),
]
operations = [
migrations.AlterField(
model_name='bookingrequest',
name='date_request',
field=models.DateField(verbose_name='date requested'),
),
]
| UTF-8 | Python | false | false | 415 | py | 71 | 0081_auto_20180312_1011.py | 49 | 0.604819 | 0.53012 | 0 | 18 | 22.055556 | 66 |
Mistchenko/fastapi_template | 3,332,894,665,554 | 77981077de8ac16e69c771a44c64c6bcba6f5b87 | b4ef13e1bdedba0ae342d30a017b2555861225c9 | /src/ccxx/fastapi/main.py | fc61700257ae7af036070e557edfc3994b7873d5 | []
| no_license | https://github.com/Mistchenko/fastapi_template | f76126e4d2f20cc0d708ade953a50723ec27c0f6 | c458b03e13600c48b9a6c044bbf8db6bfdbf190f | refs/heads/main | 2023-01-03T04:55:16.903630 | 2020-10-20T09:11:53 | 2020-10-20T09:11:53 | 305,618,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
ccxx.fastapi.main — стартовый модуль сервиса.
"""
import logging
from fastapi import FastAPI
from logger.middleware import LoggerMiddleware
app = FastAPI()
app.add_middleware(LoggerMiddleware)
logger = logging.getLogger(__name__)
@app.get("/")
async def hello():
"""
Получить приветственное сообщение.
"""
logger.info('Demo logger info')
logger.debug('Demo logger DEBUG')
message = f"Hello"
return {"message": message}
| UTF-8 | Python | false | false | 509 | py | 8 | main.py | 5 | 0.698238 | 0.698238 | 0 | 21 | 20.619048 | 46 |
SaschaFroelich/Boltzmann-Machine | 2,534,030,740,898 | 6702b1fdbd5881a75c66ddec48fcc28626b0cea7 | 36793a44316de9ce8f4ea1d46de4081dd9ddd276 | /boltzmann_distribution.py | 5c76e9bcb88c9b7d01dc827cebd5d6ebc1a8d515 | []
| no_license | https://github.com/SaschaFroelich/Boltzmann-Machine | a69d3f5199c1bc0b47cf4971bd2b0bf7e418e280 | 19974ace70e692bac093d62fd8062d55ddc9b491 | refs/heads/master | 2021-07-12T09:24:47.072670 | 2021-04-12T08:50:39 | 2021-04-12T08:50:39 | 243,605,384 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 20:09:17 2020
@author: sascha
"""
# Distance from actual Boltzmann Distro
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import operator as op
from functools import reduce
import BM
#%%
no_iterations = np.linspace(10,10e2,100)
average_distance = []
for iters in no_iterations:
print("The no of iters is %d"%int(iters))
bm = BM.BoltzmannM(10, init_state=np.ones(10))
boltzmann_occ, rel_occ_in_uniques, E = bm.iterate(int(iters))
dist = abs(boltzmann_occ-rel_occ_in_uniques).sum()/len(rel_occ_in_uniques)
average_distance.append(dist)
#%%
bm = BM.BoltzmannM(10, init_state=np.ones(10))
boltzmann_occ, rel_occ_in_uniques, E = bm.iterate(100)
#%%
plt.figure()
plt.plot(no_iterations,average_distance)
plt.ylabel('Distance [a.u.]')
plt.title('Average distance between Boltzmann distribution and actual state vector distribution at the end of the iterations')
plt.xlabel('No of iterations')
plt.savefig('/home/sascha/Desktop/PrivateProjects/Boltzmann Machine/average_distance.svg')
plt.show() | UTF-8 | Python | false | false | 1,139 | py | 10 | boltzmann_distribution.py | 7 | 0.721686 | 0.692713 | 0 | 46 | 23.782609 | 126 |
danieliong/SWMF_QU | 17,961,553,254,710 | addb77856d5f76d4d3c2918b8237e07018899f36 | be310d06b94715b881d9befd33e32014effc4d9a | /JuliaFiles/2021_02_26_RunExploration/Scripts/formatMetrics.py | fc7260bfcc3e30427ded3c1f5cd6a2736919edaf | []
| no_license | https://github.com/danieliong/SWMF_QU | 063c7d4050843680b60a722310666e168dae3f56 | 6468896c11c8b2cb9b4ee5900d8d4cd62bf9df4d | refs/heads/master | 2023-05-19T03:26:54.933003 | 2022-11-22T03:34:53 | 2022-11-22T03:34:53 | 342,301,340 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import pandas as pd
import numpy as np
relvar = pd.read_csv("output/relvar_96runs.csv")
relvar["model"] = np.concatenate([["AWSoM"]*8, ["AWSoMR"]*8])
relvar.set_index(["model","mapCR"], inplace=True)
relvar_tbl = relvar.to_latex()
with open("output/relvar_tbl.tex", "w") as f:
f.write(relvar_tbl)
rmse = pd.read_csv("output/rmse_96runs.csv")
rmse["model"] = np.concatenate([["AWSoM"]*8, ["AWSoMR"]*8])
rmse.set_index(["model","mapCR"], inplace=True)
rmse_tbl = rmse.to_latex()
with open("output/rmse_tbl.tex", "w") as f:
f.write(rmse_tbl)
| UTF-8 | Python | false | false | 574 | py | 37 | formatMetrics.py | 3 | 0.662021 | 0.648084 | 0 | 20 | 27.7 | 61 |
payor-ma/spvs-calc | 1,992,864,841,423 | acfaa129fb7615d0d442fba2ac36bc30679d6c61 | b0c2d91de6065075be5e5fe3761b6014598311f2 | /_sanitsation.py | 7fbc6abb8a10176499e6858d3b7c554fa7b9169e | [
"MIT"
]
| permissive | https://github.com/payor-ma/spvs-calc | efc85fadab21cc91da97352c64922bcf20a3e5e2 | bee0ea14f45e0dad1e893b3ceced1ba734784956 | refs/heads/main | 2023-04-17T20:34:58.159452 | 2021-05-06T13:58:49 | 2021-05-06T13:58:49 | 364,893,548 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import sys
from _constants import *
def checkSexDigit(sexStrDigit):
try:
if sexStrDigit == "" or sexStrDigit == " ":
sexStrDigit = "0"
sexDigit = int(sexStrDigit)
if sexDigit > 59:
raise ValueError("Digit above 59.")
if sexDigit < 0:
raise ValueError("Digit negative.")
return sexDigit
except ValueError:
sys.exit("Invalid sexagesmial number inputted") | UTF-8 | Python | false | false | 470 | py | 7 | _sanitsation.py | 6 | 0.597872 | 0.582979 | 0 | 18 | 25.166667 | 55 |
all-of-us/raw-data-repository | 15,204,184,263,476 | f0e2c8c81114666d49dd469fa010644c57018eba | af43615e07f2bfaa908d6d96b4c90f98ce3ad47b | /rdr_service/resource/schemas/genomics.py | 346a01b2f8a63c6adf058d1fd1fb4f72ca60d6d8 | [
"BSD-3-Clause"
]
| permissive | https://github.com/all-of-us/raw-data-repository | 11aa25385543f5f8ef706663b79ce181775c1c9a | 461ae46aeda21d54de8a91aa5ef677676d5db541 | refs/heads/devel | 2023-09-01T06:47:25.710651 | 2023-09-01T01:18:56 | 2023-09-01T01:18:56 | 66,000,771 | 46 | 22 | BSD-3-Clause | false | 2023-09-14T21:06:38 | 2016-08-18T13:47:08 | 2023-04-02T23:54:37 | 2023-09-14T21:06:36 | 28,609 | 39 | 16 | 2 | Python | false | false | #
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
from marshmallow import validate
from rdr_service.resource import Schema, fields
from rdr_service.resource.constants import SchemaID
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicJob, GenomicWorkflowState, \
GenomicSubProcessStatus, GenomicSubProcessResult, GenomicManifestTypes, GenomicQcStatus
class GenomicSetSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
genomic_set_name = fields.String(validate=validate.Length(max=80))
genomic_set_criteria = fields.String(validate=validate.Length(max=80))
genomic_set_version = fields.Int32()
genomic_set_file = fields.String(validate=validate.Length(max=250))
genomic_set_file_time = fields.DateTime()
genomic_set_status = fields.EnumString(enum=GenomicSetStatus)
genomic_set_status_id = fields.EnumInteger(enum=GenomicSetStatus)
validated_time = fields.DateTime()
class Meta:
schema_id = SchemaID.genomic_set
resource_uri = 'GenomicSet'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicSetMemberSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
genomic_set_id = fields.Int32()
participant_id = fields.String(validate=validate.Length(max=10))
ny_flag = fields.Int32()
sex_at_birth = fields.String(validate=validate.Length(max=20))
genome_type = fields.String(validate=validate.Length(max=80))
biobank_id = fields.Int32()
biobank_id_str = fields.String(validate=validate.Length(max=128))
package_id = fields.String(validate=validate.Length(max=80))
validation_status = fields.EnumString(enum=GenomicSetMemberStatus)
validation_status_id = fields.EnumInteger(enum=GenomicSetMemberStatus)
# validation_flags is an array of GenomicValidationFlag Enum values.
validation_flags = fields.String(validate=validate.Length(max=80))
validated_time = fields.DateTime()
sample_id = fields.String(validate=validate.Length(max=80))
sample_type = fields.String(validate=validate.Length(max=50))
reconcile_cvl_job_run_id = fields.Int32()
sequencing_file_name = fields.String(validate=validate.Length(max=128))
reconcile_gc_manifest_job_run_id = fields.Int32()
reconcile_metrics_bb_manifest_job_run_id = fields.Int32()
reconcile_metrics_sequencing_job_run_id = fields.Int32()
ai_an = fields.String(validate=validate.Length(max=2))
gc_manifest_box_plate_id = fields.String(validate=validate.Length(max=50))
gc_manifest_box_storage_unit_id = fields.String(validate=validate.Length(max=50))
gc_manifest_contact = fields.String(validate=validate.Length(max=50))
gc_manifest_email = fields.String(validate=validate.Length(max=50))
gc_manifest_failure_description = fields.String(validate=validate.Length(max=128))
gc_manifest_failure_mode = fields.String(validate=validate.Length(max=128))
gc_manifest_matrix_id = fields.String(validate=validate.Length(max=20))
gc_manifest_parent_sample_id = fields.String(validate=validate.Length(max=20))
gc_manifest_quantity_ul = fields.Int32()
gc_manifest_sample_source = fields.String(validate=validate.Length(max=20))
gc_manifest_study = fields.String(validate=validate.Length(max=50))
gc_manifest_study_pi = fields.String(validate=validate.Length(max=50))
gc_manifest_test_name = fields.String(validate=validate.Length(max=50))
gc_manifest_total_concentration_ng_per_ul = fields.Int32()
gc_manifest_total_dna_ng = fields.Int32()
gc_manifest_tracking_number = fields.String(validate=validate.Length(max=50))
gc_manifest_treatments = fields.String(validate=validate.Length(max=20))
gc_manifest_visit_description = fields.String(validate=validate.Length(max=128))
gc_manifest_well_position = fields.String(validate=validate.Length(max=10))
gem_a1_manifest_job_run_id = fields.Int32()
gem_a2_manifest_job_run_id = fields.Int32()
gem_pass = fields.String(validate=validate.Length(max=10))
gem_a3_manifest_job_run_id = fields.Int32()
aw3_manifest_job_run_id = fields.Int32()
aw4_manifest_job_run_id = fields.Int32()
cvl_aw1c_manifest_job_run_id = fields.Int32()
cvl_aw1cf_manifest_job_run_id = fields.Int32()
cvl_w1_manifest_job_run_id = fields.Int32()
cvl_w2_manifest_job_run_id = fields.Int32()
cvl_w3_manifest_job_run_id = fields.Int32()
cvl_w4_manifest_job_run_id = fields.Int32()
cvl_w4f_manifest_job_run_id = fields.Int32()
genomic_workflow_state = fields.EnumString(enum=GenomicWorkflowState)
genomic_workflow_state_id = fields.EnumInteger(enum=GenomicWorkflowState)
collection_tube_id = fields.String(validate=validate.Length(max=80))
gc_site_id = fields.String(validate=validate.Length(max=11))
genomic_workflow_state_modified_time = fields.DateTime()
report_consent_removal_date = fields.DateTime()
qc_status = fields.EnumString(enum=GenomicQcStatus)
qc_status_id = fields.EnumInteger(enum=GenomicQcStatus)
fingerprint_path = fields.String(validate=validate.Length(max=255))
dev_note = fields.String(validate=validate.Length(max=255))
aw1_file_processed_id = fields.Int32()
aw2_file_processed_id = fields.Int32()
aw2f_job_run_id = fields.Int32()
aw0_manifest_file_id = fields.Int32()
aw2f_manifest_job_run_id = fields.Int32()
aw3_manifest_file_id = fields.Int32()
block_research = fields.Int16()
block_research_reason = fields.String(validate=validate.Length(max=255))
block_results = fields.Int16()
block_results_reason = fields.String(validate=validate.Length(max=255))
color_metrics_job_run_id = fields.Int32()
cvl_secondary_conf_failure = fields.String(validate=validate.Length(max=255))
cvl_w1il_hdr_job_run_id = fields.Int32()
cvl_w1il_pgx_job_run_id = fields.Int32()
cvl_w2sc_manifest_job_run_id = fields.Int32()
cvl_w2w_job_run_id = fields.Int32()
cvl_w3ns_manifest_job_run_id = fields.Int32()
cvl_w3sc_manifest_job_run_id = fields.Int32()
cvl_w3sr_manifest_job_run_id = fields.Int32()
cvl_w3ss_manifest_job_run_id = fields.Int32()
cvl_w4wr_hdr_manifest_job_run_id = fields.Int32()
cvl_w4wr_pgx_manifest_job_run_id = fields.Int32()
cvl_w5nf_hdr_manifest_job_run_id = fields.Int32()
cvl_w5nf_pgx_manifest_job_run_id = fields.Int32()
diversion_pouch_site_flag = fields.Int16()
gem_date_of_import = fields.DateTime()
gem_metrics_ancestry_loop_response = fields.String(validate=validate.Length(max=10))
gem_metrics_available_results = fields.String(validate=validate.Length(max=255))
gem_metrics_results_released_at = fields.Int32()
ignore_flag = fields.Int16()
informing_loop_ready_flag = fields.Int32()
informing_loop_ready_flag_modified = fields.DateTime()
participant_origin = fields.String(validate=validate.Length(max=80))
replated_member_id = fields.Int32()
class Meta:
schema_id = SchemaID.genomic_set_member
resource_uri = 'GenomicSetMember'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicJobRunSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
job = fields.EnumString(enum=GenomicJob)
job_id = fields.EnumInteger(enum=GenomicJob)
start_time = fields.DateTime()
end_time = fields.DateTime()
run_status = fields.EnumString(enum=GenomicSubProcessStatus)
run_status_id = fields.EnumInteger(enum=GenomicSubProcessStatus)
run_result = fields.EnumString(enum=GenomicSubProcessResult)
run_result_id = fields.EnumInteger(enum=GenomicSubProcessResult)
result_message = fields.String(validate=validate.Length(max=150))
class Meta:
schema_id = SchemaID.genomic_job_run
resource_uri = 'GenomicJobRun'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicFileProcessedSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
run_id = fields.Int32()
start_time = fields.DateTime()
end_time = fields.DateTime()
file_path = fields.String(validate=validate.Length(max=255))
bucket_name = fields.String(validate=validate.Length(max=128))
file_name = fields.String(validate=validate.Length(max=128))
file_status = fields.EnumString(enum=GenomicSubProcessStatus)
file_status_id = fields.EnumInteger(enum=GenomicSubProcessStatus)
file_result = fields.EnumString(enum=GenomicSubProcessResult)
file_result_id = fields.EnumInteger(enum=GenomicSubProcessResult)
upload_date = fields.DateTime()
genomic_manifest_file_id = fields.Int32()
class Meta:
schema_id = SchemaID.genomic_file_processed
resource_uri = 'GenomicFileProcessed'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicManifestFileSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
upload_date = fields.DateTime()
manifest_type = fields.EnumString(enum=GenomicManifestTypes)
manifest_type_id = fields.EnumInteger(enum=GenomicManifestTypes)
file_path = fields.String(validate=validate.Length(max=255))
bucket_name = fields.String(validate=validate.Length(max=128))
record_count = fields.Int32()
rdr_processing_complete = fields.Int16()
rdr_processing_complete_date = fields.DateTime()
ignore_flag = fields.Int16()
file_name = fields.String(validate=validate.Length(max=255))
class Meta:
schema_id = SchemaID.genomic_manifest_file
resource_uri = 'GenomicManifestFile'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicManifestFeedbackSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
input_manifest_file_id = fields.Int32()
feedback_manifest_file_id = fields.Int32()
feedback_record_count = fields.Int32()
feedback_complete = fields.Int16()
feedback_complete_date = fields.DateTime()
ignore_flag = fields.Int16()
version = fields.Int32()
class Meta:
schema_id = SchemaID.genomic_manifest_feedback
resource_uri = 'GenomicManifestFeedback'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicGCValidationMetricsSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
genomic_set_member_id = fields.Int32()
genomic_file_processed_id = fields.Int32()
lims_id = fields.String(validate=validate.Length(max=80))
call_rate = fields.String(validate=validate.Length(max=10))
mean_coverage = fields.String(validate=validate.Length(max=10))
genome_coverage = fields.String(validate=validate.Length(max=10))
contamination = fields.String(validate=validate.Length(max=10))
sex_concordance = fields.String(validate=validate.Length(max=10))
processing_status = fields.String(validate=validate.Length(max=15))
notes = fields.String(validate=validate.Length(max=128))
site_id = fields.String(validate=validate.Length(max=80))
chipwellbarcode = fields.String(validate=validate.Length(max=80))
sex_ploidy = fields.String(validate=validate.Length(max=10))
crai_path = fields.String(validate=validate.Length(max=255))
cram_md5_path = fields.String(validate=validate.Length(max=255))
cram_path = fields.String(validate=validate.Length(max=255))
hf_vcf_md5_path = fields.String(validate=validate.Length(max=255))
hf_vcf_path = fields.String(validate=validate.Length(max=255))
hf_vcf_tbi_path = fields.String(validate=validate.Length(max=255))
idat_green_md5_path = fields.String(validate=validate.Length(max=255))
idat_green_path = fields.String(validate=validate.Length(max=255))
idat_red_md5_path = fields.String(validate=validate.Length(max=255))
idat_red_path = fields.String(validate=validate.Length(max=255))
raw_vcf_md5_path = fields.String(validate=validate.Length(max=255))
raw_vcf_path = fields.String(validate=validate.Length(max=255))
raw_vcf_tbi_path = fields.String(validate=validate.Length(max=255))
vcf_md5_path = fields.String(validate=validate.Length(max=255))
vcf_path = fields.String(validate=validate.Length(max=255))
aligned_q30_bases = fields.Int64()
array_concordance = fields.String(validate=validate.Length(max=10))
aou_hdr_coverage = fields.String(validate=validate.Length(max=10))
vcf_tbi_path = fields.String(validate=validate.Length(max=255))
dev_note = fields.String(validate=validate.Length(max=255))
ignore_flag = fields.Int16()
contamination_category = fields.String(validate=validate.Length(max=64))
contamination_category_id = fields.Int16()
crai_deleted = fields.Int16()
cram_deleted = fields.Int16()
cram_md5_deleted = fields.Int16()
hf_vcf_deleted = fields.Int16()
hf_vcf_md5_deleted = fields.Int16()
hf_vcf_tbi_deleted = fields.Int16()
idat_green_deleted = fields.Int16()
idat_green_md5_deleted = fields.Int16()
idat_red_deleted = fields.Int16()
idat_red_md5_deleted = fields.Int16()
raw_vcf_deleted = fields.Int16()
raw_vcf_md5_deleted = fields.Int16()
raw_vcf_tbi_deleted = fields.Int16()
vcf_deleted = fields.Int16()
vcf_md5_deleted = fields.Int16()
vcf_tbi_deleted = fields.Int16()
drc_call_rate = fields.String(validate=validate.Length(max=255))
drc_contamination = fields.String(validate=validate.Length(max=255))
drc_fp_concordance = fields.String(validate=validate.Length(max=255))
drc_mean_coverage = fields.String(validate=validate.Length(max=255))
drc_sex_concordance = fields.String(validate=validate.Length(max=255))
gvcf_deleted = fields.Int16()
gvcf_md5_deleted = fields.Int16()
gvcf_md5_path = fields.String(validate=validate.Length(max=512))
gvcf_path = fields.String(validate=validate.Length(max=512))
mapped_reads_pct = fields.String(validate=validate.Length(max=10))
pipeline_id = fields.String(validate=validate.Length(max=255))
# DA-3072, PDR-1435 - WGS Reprocessing and Pipeline Upgrade columns
processing_count = fields.Int16()
aw3_ready_flag = fields.Int16()
aw3_manifest_job_run_id = fields.Int32()
aw3_manifest_file_id = fields.Int32()
aw4_manifest_job_run_id = fields.Int32()
class Meta:
schema_id = SchemaID.genomic_gc_validation_metrics
resource_uri = 'GenomicGCValidationMetrics'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicUserEventMetricsSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
participant_id = fields.String(validate=validate.Length(max=10))
created_at = fields.DateTime()
event_name = fields.String(validate=validate.Length(max=512))
device = fields.String(validate=validate.Length(max=255))
operating_system = fields.String(validate=validate.Length(max=255))
browser = fields.String(validate=validate.Length(max=255))
file_path = fields.String(validate=validate.Length(max=512))
run_id = fields.Int32()
ignore_flag = fields.Int16()
reconcile_job_run_id = fields.Int32()
class Meta:
schema_id = SchemaID.genomic_user_event_metrics
resource_uri = 'GenomicUserEventMetrics'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicInformingLoopSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
message_record_id = fields.Int32()
participant_id = fields.String(validate=validate.Length(max=10))
event_type = fields.String(validate=validate.Length(max=256))
event_authored_time = fields.DateTime()
module_type = fields.String(validate=validate.Length(max=128))
decision_value = fields.String(validate=validate.Length(max=128))
sample_id = fields.String(validate=validate.Length(max=80))
class Meta:
schema_id = SchemaID.genomic_informing_loop
resource_uri = 'GenomicInformingLoop'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function).
class GenomicCVLResultPastDueSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
genomic_set_member_id = fields.Int32()
sample_id = fields.String(validate=validate.Length(max=255))
results_type = fields.String(validate=validate.Length(max=128))
cvl_site_id = fields.String(validate=validate.Length(max=128))
email_notification_sent = fields.Int16()
email_notification_sent_date = fields.DateTime()
resolved = fields.Int16()
resolved_date = fields.DateTime()
class Meta:
schema_id = SchemaID.genomic_cvl_result_past_due
resource_uri = 'GenomicCVLResultPastDue'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function)
class GenomicMemberReportStateSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
genomic_set_member_id = fields.Int32()
genomic_report_state = fields.Int16()
module = fields.String(validate=validate.Length(max=128))
participant_id = fields.String(validate=validate.Length(max=10))
genomic_report_state_str = fields.String(validate=validate.Length(max=128))
event_authored_time = fields.DateTime()
event_type = fields.String(validate=validate.Length(max=128))
message_record_id = fields.Int16()
sample_id = fields.String(validate=validate.Length(max=128))
report_revision_number = fields.Int16()
class Meta:
schema_id = SchemaID.genomic_member_report_state
resource_uri = 'GenomicMemberReportState'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function)
class GenomicResultViewedSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
message_record_id = fields.Int32()
participant_id = fields.String(validate=validate.Length(max=10))
event_type = fields.String(validate=validate.Length(max=256))
event_authored_time = fields.DateTime()
module_type = fields.String(validate=validate.Length(max=128))
first_viewed = fields.DateTime()
last_viewed = fields.DateTime()
sample_id = fields.String(validate=validate.Length(max=80))
class Meta:
schema_id = SchemaID.genomic_result_viewed
resource_uri = 'GenomicResultViewed'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function)
class GenomicAppointmentEventSchema(Schema):
id = fields.Int32()
created = fields.DateTime()
modified = fields.DateTime()
message_record_id = fields.Int32()
participant_id = fields.String(validate=validate.Length(max=10))
event_type = fields.String(validate=validate.Length(max=256))
event_authored_time = fields.DateTime()
module_type = fields.String(validate=validate.Length(max=255))
appointment_id = fields.Int32()
source = fields.String(validate=validate.Length(max=255))
location = fields.String(validate=validate.Length(max=255))
contact_number = fields.String(validate=validate.Length(max=255))
language = fields.String(validate=validate.Length(max=255))
cancellation_reason = fields.String(validate=validate.Length(max=255))
appointment_timezone = fields.String(validate=validate.Length(max=255))
appointment_timestamp = fields.DateTime()
class Meta:
schema_id = SchemaID.genomic_appointment_event
resource_uri = 'GenomicAppointmentEvent'
resource_pk_field = 'id'
pii_fields = () # List fields that contain PII data.
pii_filter = {} # dict(field: lambda function) | UTF-8 | Python | false | false | 20,904 | py | 1,666 | genomics.py | 1,436 | 0.706994 | 0.67853 | 0 | 460 | 44.445652 | 115 |
celiomarcio/semanticethon | 15,290,083,585,165 | ddfbe7a1c39f85bba98b0e61f1a0913a292b8acb | 2f320584dc524bea1c0c7378f84c2f83cc044f43 | /app/mimetype.py | a6ae4a8c2e1144c926136eba961d68c22b154524 | [
"Apache-2.0"
]
| permissive | https://github.com/celiomarcio/semanticethon | a9847b24c67ae3deaca82724bee4e4d988c21d40 | 2bade7ff7288ed8ccf838662bdeb0342e32c8150 | refs/heads/main | 2023-07-06T19:55:56.543139 | 2021-08-11T14:23:33 | 2021-08-11T14:23:33 | 394,400,818 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | JSON = 'application/json'
mimetype_map = {
'xml': 'application/rdf+xml',
'json-ld': JSON,
'n3': 'text/n3',
'turtle': 'application/x-turtle',
# None: 'text/xml'
}
def correct_format(output_format):
if output_format is None:
corrected_output_format = 'xml'
else:
corrected_output_format = output_format
return corrected_output_format | UTF-8 | Python | false | false | 381 | py | 13 | mimetype.py | 6 | 0.627297 | 0.622047 | 0 | 17 | 21.470588 | 47 |
ysk0951/PythonTestcrawling | 5,686,536,713,019 | d73f6c757808e779269cce46e458441ef69efd77 | 614881f4dede6fb350ab58c299ce0b94fa51f319 | /python0909/Test03.py | 81fcb6f2f5c0435cb901ff898280ca4fb281a114 | []
| no_license | https://github.com/ysk0951/PythonTestcrawling | 3398008eec78919c3bd1b1075c5cae97790c96d5 | ae651d0447a522b672e22e090e5fafae1cde4c48 | refs/heads/master | 2022-12-29T14:47:01.831632 | 2020-10-03T09:13:53 | 2020-10-03T09:13:53 | 293,973,512 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
#입력함수 : input()
콘솔을 통해 사용자로 부터 입력을 받는 함수
모두 문자열로 받아옴
#형변환
문자 >> 정수/실수
int(값)/float(값)
정수,실수 >> 문자
str(값)
'''
#age = input("나이을 입력하세요.")
#age = int(age)
#print(age+10)
#문자열 str
#"문자열" '문자열' """여러문자열""" '''여러문자열'''
print("""hello
python
hahahah""")
#문자열 연산
a = "pyton"
b = "더하기"
c = a+b
print(c)
print(a+b)
print(a,b)
#2. 문자열 반복하기 : *
print(a*10)
d = a*10
print(d)
#
#person1 = int(input("입력1"))
#person2 = int(input("입력2"))
#person3 = int(input("입력3"))
#result = (person1+person2+person3)/3
#print("결과는 %.1f입니다" %result)
#문자열indexing
str = "hello"
print(str[0])
a = "Global"
print(a[0])
str = "hello"
print(str[4]+str[3]+str[2]+str[1]+str[0])
print(str[-1]+str[-2]+str[-3]+str[-4]+str[0])
| UTF-8 | Python | false | false | 944 | py | 60 | Test03.py | 58 | 0.560109 | 0.519126 | 0 | 51 | 13.352941 | 45 |
ZaycheS/cpplitedocs | 8,392,366,120,387 | d9a1198ab1f9e6ae89b5e2ef7b16c1b2b0023c26 | e0215c0434151e4f7ab72f045a283f8b84badf83 | /method_pars.py | 272e513fb4a04a7f2a289cc4ff4886e44090fda3 | []
| no_license | https://github.com/ZaycheS/cpplitedocs | b0da406d681c810f411607695d01e070c1b55362 | 798f843aac2c3e90d9d4fcf37638045e565eaefd | refs/heads/master | 2020-08-03T21:06:37.105188 | 2019-11-25T20:55:40 | 2019-11-25T20:55:40 | 211,885,912 | 0 | 0 | null | false | 2019-11-30T16:22:52 | 2019-09-30T14:58:37 | 2019-11-25T20:56:02 | 2019-11-30T16:21:46 | 22 | 0 | 0 | 1 | Python | false | false | from util import *
from typename import TypeName
def method_str_handler(string, method_desc):
init_str = string.strip().split()
for i in range(len(init_str)):
if init_str[i] in keywords_list:
method_desc.add_keyword(init_str[i])
else:
break
else:
return
method_desc.type = init_str[i]
method_desc.name = ''
for k in range(i + 1, len(init_str)):
if init_str[k].find('(') != -1:
method_desc.name += ' ' + init_str[k].split('(', 1)[0]
break
else:
method_desc.name += ' ' + init_str[k]
method_desc.name=method_desc.name.strip()
ending = string.split('(', 1)[1]
result = string_parentheses_skip(ending, '(')
ending = ending[result[1]:]
params = result[0]
after_keywords = ending.replace('{', '').replace(';', '').split()
for ends in after_keywords:
if ends in keywords_list:
method_desc.add_keyword(ends)
for param in params:
parameter = TypeName()
param_comps = param.split()
for j in range(len(param_comps)):
if param_comps[j] in keywords_list:
parameter.add_keyword(param_comps[j])
else:
break
else:
continue
if param_comps[j] == 'void':
continue
parameter.set_type(param_comps[j].strip())
if j + 1 < len(param_comps):
parameter.set_name(param_comps[j + 1].strip())
method_desc.add_parameter(parameter)
def method_parser(strings, method_desc):
# i = parentheses_skip(strings, 0, '(')
i = 0
stop = False
while not stop and i < len(strings):
for j in strings[i]:
if j == ';' or j == '{':
stop = True
i += 1
method = ''
for j in range(0, i):
method += strings[j].strip()
method_str_handler(method, method_desc)
if method.strip().endswith(';'):
return i
k = parentheses_skip(strings, 0)
return k
| UTF-8 | Python | false | false | 2,031 | py | 14 | method_pars.py | 14 | 0.536681 | 0.529296 | 0 | 66 | 29.772727 | 69 |
justperson94/Programming_exercise | 11,656,541,274,357 | 1cdbd8611d5d5a383ebd4ccdf5abae3fe32b8a0a | 1386891e2ba711acaa3a75464110840520397511 | /separate_two_file_types.py | e024a788266e03331b22902be2a0267dc6cb4120 | []
| no_license | https://github.com/justperson94/Programming_exercise | 87f9fe581e6b9220808bc7c2bc7361339c02e8ad | f31a6f3858488a2e8aa2f665de18b70819f6e35b | refs/heads/master | 2023-06-29T13:36:38.935414 | 2019-11-15T12:28:03 | 2019-11-15T12:28:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from shutil import copy
dataset_path = './images/'
output_root = './images_output/'
img_output_path = './images_output/images/'
label_output_path = './images_output/annotations/'
make_list = []
file_list = []
if not os.path.exists(img_output_path):
os.makedirs(img_output_path)
if not os.path.exists(label_output_path):
os.makedirs(label_output_path)
for root, _, files in os.walk(dataset_path, topdown=False):
maker = root[9:]
print(root, files)
for file in files:
print('maker : ', root[9:])
print('file : ', files[0][-3:])
if file[-3:] == 'xml':
if not os.path.exists(label_output_path + maker):
os.makedirs(label_output_path + maker)
copy(dataset_path + maker + '/' + file, label_output_path + maker)
elif file[-3:] == 'jpg':
if not os.path.exists(img_output_path + maker):
os.makedirs(img_output_path + maker)
copy(dataset_path + maker + '/' + file, img_output_path + maker)
else:
pass
| UTF-8 | Python | false | false | 1,061 | py | 22 | separate_two_file_types.py | 19 | 0.584354 | 0.578699 | 0 | 36 | 28.472222 | 78 |
nixonbali/whid | 15,195,594,333,058 | e4fc055c5cd64c55d6dc93a45dab5ee0729f7c94 | 94612c5215723240e689398aa3518afb532e8ca3 | /whid | 89af8d6232f19fa089e6bfe429d6ba063e43ce75 | []
| no_license | https://github.com/nixonbali/whid | 03640ff282d335fb4546bbe32435f921495ad72a | 25fd01003ecbadcff50d5ce8603891b59e14c6b6 | refs/heads/master | 2022-12-22T21:34:46.251143 | 2020-10-08T16:14:46 | 2020-10-08T16:14:46 | 256,850,226 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/local/bin/python3
import sys
from db.models import Things, Events, Notes
from datetime import datetime, timedelta
"""
v0 run w/: $ python3 -m cli.main [args]
"""
"""
args:
event [thing_name] [starttime] [endtime=None] [place=None] [duration=None]
-> triggers newEvent
-> asks for Note
-> if yes
- -> opens note.txt in shell text editor
- -> upon save/exit -> triggers Event.newNote(session, content=read(note.txt))
note [thing_names]
-> opens note.txt in shell text editor
-> upon save/exit -> triggers Note.newNote(session, thing_names=[thing_names], content=read(note.txt)
"""
"""
Development Mode: Use Test DB
"""
development = True
# if development:
# from db.db import create_session
# engine, Session = create_session("postgresql://localhost/test-whid.v0")
# else:
from db.db import Session
session = Session()
def invalid_input(*args):
"""Handles Invalid Input Argument"""
print(f"Invalid Action: {sys.argv[1]}")
print("Action must be in [event, note]")
def new_event(*args):
"""New Event Input"""
#Events.newEvent(cls, session, thing_name, starttime=None, endtime=None, place=None)
#Events.newNote(self, session, content)
# Most Basic Input: Just Thing Name
# Next Level: Duration and/or Place
# Next Level: Update Default Duration and/or Place
print('creating new event\n')
event = Events.newEvent(session, thing_name=args[0]) #ugly, use kwargs?
print(event.thing.name, event.id)
print('\n')
def new_note(*args):
"""New Note Input"""
#Notes.newNote(cls, session, thing_names=[], thing_ids=[], content=None)
print('creating new note')
def list_things(*args):
"""Lists Things"""
dayrange = args['dayrange'] if 'dayrange' in args else 14
print("Things You're Doing:\n")
today = datetime.today().date()
daterange = [today - timedelta(days=i) for i in range(dayrange, -1, -1)]
for thing in session.query(Things).all():
line = thing.name + " "*(20 - len(thing.name))
event_dates = set(event.starttime.date() for event in session.query(Events.starttime).filter(Events.thing_id == thing.id).all() if event.starttime != None)
line += " ".join(["X" if day in event_dates else "_" for day in daterange])
# " " if
print(line)
# print("\n".join(thing.name for thing in session.query(Things).all()))
"""
Other Functions
List Events/Notes By Thing
"""
from collections import defaultdict
switcher = defaultdict(lambda: invalid_input)
switcher['event'] = new_event
switcher['note'] = new_note
switcher['things'] = list_things
if __name__ == "__main__":
print('whid called\n')
try:
switcher[sys.argv[1]](*sys.argv[2:])
except IndexError:
print('No Actions Taken')
| UTF-8 | Python | false | false | 2,752 | 7 | whid | 4 | 0.66061 | 0.655523 | 0 | 96 | 27.666667 | 163 |
|
dkalashnik/stacklight-pytest | 8,048,768,728,448 | 49e8fcff1920dadb61557ef54c71a16de7fc60a2 | 4a1efb78203489dcdaecc5ba3fffe8887828bc17 | /stacklight_tests/tests/test_functional.py | 09424fbaab8b6a09ac038b28041373384d225437 | []
| no_license | https://github.com/dkalashnik/stacklight-pytest | bb2bef9f45961985a96657ddccf815086d5b437d | b76fcc55927ec9a5c03bfe8c643ddbb8b1e6c90d | refs/heads/master | 2021-01-13T12:37:06.297963 | 2018-03-28T22:01:11 | 2018-03-29T04:03:45 | 72,528,472 | 1 | 7 | null | false | 2018-04-03T13:06:27 | 2016-11-01T11:03:20 | 2018-03-29T04:03:48 | 2018-03-29T04:03:46 | 364 | 1 | 8 | 9 | Python | false | null | import logging
import time
import pytest
import yaml
from stacklight_tests.clients import influxdb_grafana_api_legacy_v2
from stacklight_tests.tests import base_test
from stacklight_tests import utils
logger = logging.getLogger(__name__)
def wait_for_resource_status(resource_client, resource,
expected_status, timeout=180,
interval=10):
msg = "Timed out waiting to become {}".format(expected_status)
utils.wait(
(lambda:
resource_client.get(resource).status == expected_status),
interval=interval,
timeout=timeout,
timeout_msg=msg
)
def determinate_components_names():
with open(utils.get_fixture(
"components_names.yaml", ("tests",))) as names_file:
components_names = yaml.load(names_file)
# TODO(rpromyshlennikov): temporary fix: ovs service was not included,
# because openvswitch-agent is managed by pacemaker
# ["neutron-openvswitch-agent", ""]
components = components_names["mk"]
return components
def get_all_dashboard_names_with_datasource():
get_all_names = influxdb_grafana_api_legacy_v2.get_all_grafana_dashboards_names
dashboards = {}
for datasource in ("influxdb", "prometheus"):
dashboards.update(
{name: datasource for name in get_all_names(datasource)})
return dashboards
@pytest.fixture(scope="module",
params=get_all_dashboard_names_with_datasource().items(),
ids=get_all_dashboard_names_with_datasource().keys())
def dashboard_fixture(request, grafana_datasources):
dash_name, datasource = request.param
if grafana_datasources[datasource] is None:
pytest.skip("No datasource client({}) for dashboard: {}".format(
datasource, dash_name))
return request.param
class TestFunctional(base_test.BaseLMATest):
def test_nova_metrics(self, os_clients, os_actions, influxdb_client):
"""Verify that the Nova metrics are collecting.
Scenario:
1. Create 3 new instances
2. Check Nova metrics in InfluxDB
Duration 5m
"""
time_started = "{}s".format(int(time.time()))
check_metrics = influxdb_client.get_instance_creation_time_metrics
metrics = check_metrics(time_started)
new_instance_count = 3
new_servers = []
for _ in range(new_instance_count):
new_servers.append(os_actions.create_basic_server())
total_instances = new_instance_count + len(metrics)
msg = ("There is a mismatch of instances in Nova metrics, "
"found less than {}".format(total_instances))
utils.wait(
(lambda: len(check_metrics(time_started)) == total_instances),
interval=10, timeout=180, timeout_msg=msg)
for server in new_servers:
os_clients.compute.servers.delete(server)
def test_nova_logs_in_elasticsearch(self, cluster, es_client):
"""Check that Nova logs are present in Elasticsearch
Scenario:
1. Query Nova logs are present in current Elasticsearch index
2. Check that Nova logs are collected from all controller and
compute nodes
Duration 5m
"""
output = es_client.query_elasticsearch(
query_filter="programname:nova*", size=50)
assert output['hits']['total'] != 0, "Indexes don't contain Nova logs"
controllers_hostnames = [controller.hostname for controller
in cluster.filter_by_role("controller")]
computes_hostnames = [compute.hostname for compute
in cluster.filter_by_role("compute")]
target_hostnames = set(controllers_hostnames + computes_hostnames)
actual_hostnames = set()
for host in target_hostnames:
host_presence = es_client.query_elasticsearch(
query_filter="programname:nova* AND Hostname:{}".format(host),
size=50)
if host_presence['hits']['total'] > 0:
actual_hostnames.add(host)
assert target_hostnames == actual_hostnames, (
"There are insufficient entries in elasticsearch")
assert es_client.query_elasticsearch(
query_filter="programname:nova* AND Hostname:mon01",
size=50)['hits']['total'] == 0, (
"There are logs collected from irrelevant host")
def test_nova_notifications(self, os_clients, os_actions, es_client):
"""Check that Nova notifications are present in Elasticsearch
Scenario:
1. Launch, update, rebuild, resize, power-off, power-on, snapshot,
suspend, shutdown, and delete an instance
2. Check that Nova notifications are present in current
Elasticsearch index
Duration 15m
"""
nova_event_types = [
"compute.instance.create.start", "compute.instance.create.end",
"compute.instance.delete.start", "compute.instance.delete.end",
"compute.instance.rebuild.start", "compute.instance.rebuild.end",
# NOTE(rpromyshlennikov):
# Disabled in favor of compatibility with Mk2x
# "compute.instance.rebuild.scheduled",
# "compute.instance.resize.prep.start",
# "compute.instance.resize.prep.end",
# "compute.instance.resize.confirm.start",
# "compute.instance.resize.confirm.end",
# "compute.instance.resize.revert.start",
# "compute.instance.resize.revert.end",
"compute.instance.exists",
# "compute.instance.update",
"compute.instance.shutdown.start", "compute.instance.shutdown.end",
"compute.instance.power_off.start",
"compute.instance.power_off.end",
"compute.instance.power_on.start", "compute.instance.power_on.end",
"compute.instance.snapshot.start", "compute.instance.snapshot.end",
# "compute.instance.resize.start", "compute.instance.resize.end",
# "compute.instance.finish_resize.start",
# "compute.instance.finish_resize.end",
"compute.instance.suspend.start", "compute.instance.suspend.end",
# "scheduler.select_destinations.start",
# "scheduler.select_destinations.end"
]
instance_event_types = nova_event_types[:-2]
instance = os_actions.create_basic_server()
logger.info("Update the instance")
os_clients.compute.servers.update(instance, name="test-server")
wait_for_resource_status(
os_clients.compute.servers, instance, "ACTIVE")
image = os_actions.get_cirros_image()
logger.info("Rebuild the instance")
os_clients.compute.servers.rebuild(
instance, image, name="rebuilded_instance")
wait_for_resource_status(
os_clients.compute.servers, instance, "ACTIVE")
# NOTE(rpromyshlennikov):
# Disabled in favor of compatibility with Mk2x
# logger.info("Resize the instance")
# flavors = os_clients.compute.flavors.list(sort_key="memory_mb")
# os_clients.compute.servers.resize(instance, flavors[1])
# wait_for_resource_status(
# os_clients.compute.servers, instance, "VERIFY_RESIZE")
# logger.info("Confirm the resize")
# os_clients.compute.servers.confirm_resize(instance)
# wait_for_resource_status(
# os_clients.compute.servers, instance, "ACTIVE")
# logger.info("Resize the instance")
# os_clients.compute.servers.resize(instance, flavors[2])
# wait_for_resource_status(
# os_clients.compute.servers, instance, "VERIFY_RESIZE")
# logger.info("Revert the resize")
# os_clients.compute.servers.revert_resize(instance)
# wait_for_resource_status(
# os_clients.compute.servers, instance, "ACTIVE")
logger.info("Stop the instance")
os_clients.compute.servers.stop(instance)
wait_for_resource_status(
os_clients.compute.servers, instance, "SHUTOFF")
logger.info("Start the instance")
os_clients.compute.servers.start(instance)
wait_for_resource_status(
os_clients.compute.servers, instance, "ACTIVE")
logger.info("Suspend the instance")
os_clients.compute.servers.suspend(instance)
wait_for_resource_status(
os_clients.compute.servers, instance, "SUSPENDED")
logger.info("Resume the instance")
os_clients.compute.servers.resume(instance)
wait_for_resource_status(
os_clients.compute.servers, instance, "ACTIVE")
logger.info("Create an instance snapshot")
snapshot = os_clients.compute.servers.create_image(
instance, "test-image")
wait_for_resource_status(
os_clients.compute.images, snapshot, "ACTIVE")
logger.info("Delete the instance")
os_clients.compute.servers.delete(instance)
logger.info("Check that the instance was deleted")
utils.wait(
lambda: instance.id not in os_clients.compute.servers.list()
)
es_client.check_notifications(
instance_event_types,
query_filter='instance_id:"{}"'.format(instance.id), size=500)
es_client.check_notifications(
nova_event_types,
query_filter="Logger:nova", size=500)
def test_glance_notifications(self, os_clients, es_client):
"""Check that Glance notifications are present in Elasticsearch
Scenario:
1. Create, update and delete image actions using Glance v2
2. Check that Glance notifications are present in current
Elasticsearch index
Duration 15m
"""
glance_event_types = ["image.create", "image.prepare", "image.upload",
"image.activate", "image.update", "image.delete"]
image_name = utils.rand_name("image-")
client = os_clients.image
image = client.images.create(
name=image_name,
container_format="bare",
disk_format="raw")
client.images.upload(image.id, "dummy_data")
wait_for_resource_status(client.images, image.id, "active")
prop = utils.rand_name("prop")
value_prop = utils.rand_name("value")
properties = '{0}: {1}'.format(prop, value_prop)
image = client.images.update(image.id, group_props=properties)
assert any(image[key] == properties for key in image) is True
client.images.delete(image.id)
utils.wait(
lambda: (image.id not in client.images.list())
)
es_client.check_notifications(
glance_event_types,
query_filter="Logger:glance", size=500)
def test_keystone_notifications(self, os_clients, es_client):
"""Check that Keystone notifications are present in Elasticsearch
Scenario:
1. Create user and authenticate with it to Horizon
2. Check that Keystone notifications are present in current
Elasticsearch index
Duration 15m
"""
keystone_event_types = [
"identity.role.created", "identity.role.deleted",
"identity.user.created", "identity.user.deleted",
"identity.project.created", "identity.project.deleted",
"identity.authenticate"
]
client = os_clients.auth
tenant = client.tenants.create(utils.rand_name("tenant-"))
password = "123456"
name = utils.rand_name("user-")
user = client.users.create(name, password, "test@test.com", tenant.id)
role = client.roles.create(utils.rand_name("role-"))
auth = client.tokens.authenticate(
username=user.name,
password=password,
tenant_id=tenant.id,
tenant_name=tenant.name
)
assert auth
client.roles.delete(role)
client.users.delete(user)
client.tenants.delete(tenant)
es_client.check_notifications(
keystone_event_types,
query_filter="Logger:keystone", size=500)
def test_heat_notifications(self, os_clients, os_actions, es_client):
"""Check that Heat notifications are present in Elasticsearch
Scenario:
1. Run Heat platform actions
2. Check that Heat notifications are present in current
Elasticsearch index
Duration 25m
"""
heat_event_types = [
# "orchestration.stack.check.start",
# "orchestration.stack.check.end",
"orchestration.stack.create.start",
"orchestration.stack.create.end",
"orchestration.stack.delete.start",
"orchestration.stack.delete.end",
# "orchestration.stack.resume.start",
# "orchestration.stack.resume.end",
# "orchestration.stack.rollback.start",
# "orchestration.stack.rollback.end",
# "orchestration.stack.suspend.start",
# "orchestration.stack.suspend.end"
]
name = utils.rand_name("heat-flavor-")
flavor = os_actions.create_flavor(name)
filepath = utils.get_fixture("heat_create_neutron_stack_template.yaml",
parent_dirs=("heat",))
with open(filepath) as template_file:
template = template_file.read()
parameters = {
'InstanceType': flavor.name,
'ImageId': os_actions.get_cirros_image().id,
'network': os_actions.get_internal_network()["id"],
}
stack = os_actions.create_stack(template, parameters=parameters)
# os_clients.orchestration.actions.suspend(stack.id)
# utils.wait(
# (lambda:
# os_clients.orchestration.stacks.get(
# stack.id).stack_status == "SUSPEND_COMPLETE"),
# interval=10,
# timeout=180,
# )
resources = os_clients.orchestration.resources.list(stack.id)
resource_server = [res for res in resources
if res.resource_type == "OS::Nova::Server"][0]
# instance = os_clients.compute.servers.get(
# resource_server.physical_resource_id)
# assert instance.status == "SUSPENDED"
#
# os_clients.orchestration.actions.resume(stack.id)
# utils.wait(
# (lambda:
# os_clients.orchestration.stacks.get(
# stack.id).stack_status == "RESUME_COMPLETE"),
# interval=10,
# timeout=180,
# )
instance = os_clients.compute.servers.get(
resource_server.physical_resource_id)
assert instance.status == "ACTIVE"
# os_clients.orchestration.actions.check(stack.id)
#
# utils.wait(
# (lambda:
# os_clients.orchestration.stacks.get(
# stack.id).stack_status == "CHECK_COMPLETE"),
# interval=10,
# timeout=180,
# )
os_clients.orchestration.stacks.delete(stack.id)
os_clients.compute.flavors.delete(flavor.id)
name = utils.rand_name("heat-flavor-")
extra_large_flavor = os_actions.create_flavor(name, 1048576)
parameters['InstanceType'] = extra_large_flavor.name
stack = os_actions.create_stack(
template, disable_rollback=False,
parameters=parameters, wait_active=False)
assert stack.stack_status == "CREATE_IN_PROGRESS"
utils.wait(
(lambda:
os_clients.orchestration.stacks.get(
stack.id).stack_status in (
"DELETE_COMPLETE", "ROLLBACK_COMPLETE")),
interval=10,
timeout=360,
)
resources = os_clients.orchestration.resources.list(stack.id)
resource_servers = [res for res in resources
if res.resource_type == "OS::Nova::Server"]
assert (not resource_servers or
resource_servers[0].physical_resource_id == "")
os_clients.compute.flavors.delete(extra_large_flavor.id)
es_client.check_notifications(
heat_event_types,
query_filter="Logger:heat", size=500)
def test_neutron_notifications(self, os_clients, os_actions, es_client):
"""Check that Neutron notifications are present in Elasticsearch
Scenario:
1. Create and delete some of neutron entities.
2. Check that Neutron notifications are present in current
Elasticsearch index
Duration 15m
"""
neutron_event_types = [
"subnet.delete.start", "subnet.delete.end",
"subnet.create.start", "subnet.create.end",
"security_group_rule.create.start",
"security_group_rule.create.end",
"security_group.delete.start", "security_group.delete.end",
"security_group.create.start", "security_group.create.end",
"router.update.start", "router.update.end",
# "router.interface.delete", "router.interface.create",
"router.delete.start", "router.delete.end",
"router.create.start", "router.create.end",
# "port.delete.start", "port.delete.end",
# "port.create.start", "port.create.end",
"network.delete.start", "network.delete.end",
"network.create.start", "network.create.end",
# "floatingip.update.start", "floatingip.update.end",
# "floatingip.delete.start", "floatingip.delete.end",
# "floatingip.create.start", "floatingip.create.end"
]
sec_group = os_actions.create_sec_group()
tenant_id = os_actions.get_admin_tenant().id
ext_net = os_actions.get_external_network()
net = os_actions.create_network(tenant_id)
subnet = os_actions.create_subnet(net, tenant_id)
router = os_actions.create_router(ext_net, tenant_id)
os_clients.network.add_interface_router(
router['id'], {'subnet_id': subnet['id']})
# server = os_actions.create_basic_server(
# net=net, sec_groups=[sec_group.name])
# floating_ips_pool = os_clients.compute.floating_ip_pools.list()
# floating_ip = os_clients.compute.floating_ips.create(
# pool=floating_ips_pool[0].name)
# os_clients.compute.servers.add_floating_ip(server, floating_ip)
# Clean
# os_clients.compute.servers.remove_floating_ip(
# server, floating_ip)
# os_clients.compute.floating_ips.delete(floating_ip)
# os_clients.compute.servers.delete(server)
os_clients.network.remove_gateway_router(router["id"])
os_clients.network.remove_interface_router(
router["id"], {"subnet_id": subnet['id']})
os_clients.network.delete_subnet(subnet['id'])
os_clients.network.delete_router(router['id'])
os_clients.network.delete_network(net['id'])
os_clients.compute.security_groups.delete(sec_group)
es_client.check_notifications(
neutron_event_types,
query_filter="Logger:neutron", size=500)
def test_cinder_notifications(self, os_clients, es_client):
"""Check that Cinder notifications are present in Elasticsearch
Scenario:
1. Create a volume and update it
2. Check that Cinder notifications are present in current
Elasticsearch index
Duration 15m
"""
cinder_event_types = ["volume.update.start", "volume.update.end"]
cinder = os_clients.volume
logger.info("Create a volume")
volume = cinder.volumes.create(size=1)
wait_for_resource_status(
os_clients.volume.volumes, volume.id, "available")
logger.info("Update the volume")
if cinder.version == 1:
cinder.volumes.update(volume, display_name="updated_volume")
else:
cinder.volumes.update(volume, name="updated_volume")
wait_for_resource_status(
os_clients.volume.volumes, volume.id, "available")
logger.info("Delete the volume")
cinder.volumes.delete(volume)
utils.wait(lambda: volume.id not in cinder.volumes.list())
es_client.check_notifications(
cinder_event_types,
query_filter='volume_id:"{}"'.format(volume.id), size=500)
@pytest.mark.parametrize(
"components", determinate_components_names().values(),
ids=determinate_components_names().keys())
@pytest.mark.parametrize(
"controllers_count", [1, 2], ids=["warning", "critical"])
def test_alert_service(self, destructive, cluster,
influxdb_client, nagios_client,
components, controllers_count):
"""Verify that the warning and critical alerts for services
show up in the Grafana and Nagios UI.
Scenario:
1. Connect to one (for warning) or two (for critical) of
the controller nodes using ssh and stop the nova-api service.
2. Wait for at least 1 minute.
3. On Grafana, check the following items:
- the box in the upper left corner of the dashboard
displays 'WARN' or 'CRIT' with an orange/red background,
- the API panels report 1/2 entity as down.
4. Check count of haproxy backends with down state in InfluxDB
if there is backend in haproxy for checked service.
5. Check email about service state.
6. Start the nova-api service.
7. Wait for at least 1 minute.
8. On Grafana, check the following items:
- the box in the upper left corner of the dashboard
displays 'OKAY' with an green background,
- the API panels report 0 entity as down.
9. Check count of haproxy backends with down state in InfluxDB.
10. Check email about service state.
11. Repeat steps 2 to 8 for the following services:
- Nova (stopping and starting the nova-api and
nova-scheduler)
- Cinder (stopping and starting the cinder-api and
cinder-scheduler services respectively).
- Neutron (stopping and starting the neutron-server
and neutron-openvswitch-agent services respectively).
- Glance (stopping and starting the glance-api service).
- Heat (stopping and starting the heat-api service).
- Keystone (stopping and starting the Apache service).
Duration 25m
"""
def verify_service_state_change(service_names, action, new_state,
service_state_in_influx,
down_backends_in_haproxy,):
logger.info("Changing state of service {0}. "
"New state is {1}".format(service_names[0], new_state))
for toolchain_node in toolchain_nodes:
toolchain_node.os.clear_local_mail()
for node in controller_nodes:
if action == "stop":
destructive.append(
lambda: node.os.manage_service(service_names[0],
"start"))
node.os.manage_service(service_names[0], action)
influxdb_client.check_cluster_status(
service_names[1], service_state_in_influx)
if service_names[3]:
influxdb_client.check_count_of_haproxy_backends(
service_names[3], expected_count=down_backends_in_haproxy)
nagios_client.wait_service_state_on_nagios(
{service_names[2]: new_state})
msg = (
"Mail check failed for service: {} "
"with new status: {}.".format(service_names[2], new_state))
utils.wait(
lambda: (
any(t_node.os.check_local_mail(service_names[2], new_state)
for t_node in toolchain_nodes)),
timeout=5 * 60, interval=15, timeout_msg=msg)
statuses = {1: (self.WARNING_STATUS, "WARNING"),
2: (self.CRITICAL_STATUS, "CRITICAL")}
name_in_influx, name_in_alerting, services = components
toolchain_nodes = cluster.filter_by_role("monitoring")
controller_nodes = cluster.filter_by_role(
"controller")[:controllers_count]
for (service, haproxy_backend) in services:
logger.info("Checking service {0}".format(service))
verify_service_state_change(
service_names=[
service,
name_in_influx,
name_in_alerting,
haproxy_backend],
action="stop",
new_state=statuses[controllers_count][1],
service_state_in_influx=statuses[controllers_count][0],
down_backends_in_haproxy=controllers_count,)
verify_service_state_change(
service_names=[
service,
name_in_influx,
name_in_alerting,
haproxy_backend],
action="start",
new_state="OK",
service_state_in_influx=self.OKAY_STATUS,
down_backends_in_haproxy=0,)
def test_grafana_dashboard_panel_queries(self, dashboard_fixture,
grafana_client):
"""Verify that the panels on dashboards show up in the Grafana UI.
Scenario:
1. Check queries for all panels of given dashboard in Grafana.
Duration 5m
"""
dashboard_name, datasource = dashboard_fixture
grafana_client.check_grafana_online()
dashboard = grafana_client.get_dashboard(dashboard_name, datasource)
result = dashboard.classify_all_dashboard_queries()
ok_panels, partially_ok_panels, no_table_panels, failed_panels = result
fail_msg = (
"Total OK: {len_ok}\n"
"No table: {no_table}\n"
"Total no table: {len_no}\n"
"Partially ok queries: {partially_ok}\n"
"Total partially ok: {len_partially_ok}\n"
"Failed queries: {failed}\n"
"Total failed: {len_fail}".format(
len_ok=len(ok_panels),
partially_ok=partially_ok_panels.items(),
len_partially_ok=len(partially_ok_panels),
no_table=no_table_panels.items(),
len_no=len(no_table_panels),
failed=failed_panels.items(),
len_fail=len(failed_panels))
)
assert (ok_panels and not
partially_ok_panels and not
no_table_panels and not
failed_panels), fail_msg
| UTF-8 | Python | false | false | 27,401 | py | 48 | test_functional.py | 43 | 0.589905 | 0.583701 | 0 | 650 | 41.155385 | 83 |
anasmoin18/music | 5,875,515,307,731 | 8601ac90091ebb8c6299db3d0c781f58a65d6aad | 93b2c916b13aba99efebd73fc067d54363659dd5 | /gaana/migrations/0001_initial.py | 0e8ad8ca8bcdac2d9f648de12f49b268f7b91239 | []
| no_license | https://github.com/anasmoin18/music | 2750ac0d0bfb0202cc342504005c6fc8ce031234 | 9766091839c9d5b0558282ea0099a2fd85b2f212 | refs/heads/master | 2021-05-04T10:55:42.496739 | 2016-02-28T08:24:13 | 2016-02-28T08:24:13 | 51,897,686 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-16 08:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30, verbose_name='first_name')),
('last_name', models.CharField(max_length=30, verbose_name='last_name')),
('date_of_birth', models.DateField()),
('city', models.CharField(max_length=60, null=True)),
('state_province', models.CharField(max_length=30, null=True)),
('country', models.CharField(max_length=50, null=True)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=30)),
('album', models.CharField(max_length=30)),
('title', models.CharField(max_length=30)),
('genre', models.CharField(max_length=30)),
('size', models.IntegerField(blank=True, default=0)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=32)),
],
),
migrations.AddField(
model_name='myuser',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='gaana.User'),
),
]
| UTF-8 | Python | false | false | 1,995 | py | 9 | 0001_initial.py | 7 | 0.551378 | 0.532832 | 0 | 52 | 37.365385 | 114 |
ayankelevich/boto3Samples | 12,996,571,060,732 | a76fe3b211d6bd4f40caf0cab9590301efa7a605 | de7e81a332a04969ce3f324cae9a5d54abebd81c | /connection/exceptions.py | 313c0c316983ca8e8d4b9290c0af2a6216eb5fe1 | []
| no_license | https://github.com/ayankelevich/boto3Samples | 35e96de981be27449fcce3219d42e65341cf8667 | 5cd7972e4e470af41152d21c69c27498cb1498ad | refs/heads/master | 2020-12-04T17:25:45.263714 | 2020-03-21T20:54:52 | 2020-03-21T20:54:52 | 231,851,659 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import botocore # not needed if you references exceptions as boto3.exception.botocore.exception
# import boto3333 # test to get exception name
try:
import boto3
except ModuleNotFoundError as e: # Python exception, not boto3
print("how do I know the name of the exception? (see comment below)")
print(e)
sys.exit(1) # no reason to continue, if boto3 is not installed
except Exception as e:
print(e)
# To find the actual exception name, like ModuleNotFoundError, do not intercept it with try:except and see the error
print(dir(boto3.exceptions))
print(dir(boto3.exceptions.botocore.exceptions))
# print(dir(botocore.exceptions))
try:
my_session = boto3.session.Session(profile_name='bogus')
except botocore.exceptions.ProfileNotFound as e:
print(e)
print(dir(e)) # does not have response object as shown in next example
sys.exit(2) # no reason to continue, if boto3 is not installed
try:
iam_console_resource = my_session.resource(service_name='iam')
for user in iam_console_resource.users.all():
print(user)
except botocore.exceptions.ClientError as e:
print(e)
print(e.response)
print(dir(e)) # has response object
print(e.response['Error'])
print(e.response['Error']['Code'])
sys.exit(2)
| UTF-8 | Python | false | false | 1,305 | py | 10 | exceptions.py | 10 | 0.711877 | 0.700383 | 0 | 39 | 32.461538 | 116 |
TimothyBarus/python-backend-template | 2,911,987,865,576 | 04cbdea1c68d93402687e36828e49b794272539c | 2c80642754e96fd76b34eed736e4adab1b606772 | /alvinchow_backend_protobuf/alvinchow_backend_pb2_grpc.py | 4df51f327822a8a6c8f8c2ee89592bf1452a9431 | [
"MIT"
]
| permissive | https://github.com/TimothyBarus/python-backend-template | 50966a9e95f43e40e0947f31019eac9bd4b3e96a | 46c07d733d68bc8682afd8510a17bc2aa360c606 | refs/heads/master | 2023-05-10T07:00:53.360920 | 2021-02-01T20:59:02 | 2021-02-01T20:59:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from alvinchow_backend_protobuf import common_pb2 as alvinchow__backend__protobuf_dot_common__pb2
from alvinchow_backend_protobuf import foo_pb2 as alvinchow__backend__protobuf_dot_foo__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class AlvinChowServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/alvinchow_backend.AlvinChowService/Ping',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=alvinchow__backend__protobuf_dot_common__pb2.SimpleResponse.FromString,
)
self.GetFoo = channel.unary_unary(
'/alvinchow_backend.AlvinChowService/GetFoo',
request_serializer=alvinchow__backend__protobuf_dot_common__pb2.IdRequest.SerializeToString,
response_deserializer=alvinchow__backend__protobuf_dot_foo__pb2.Foo.FromString,
)
class AlvinChowServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Ping(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFoo(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AlvinChowServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=alvinchow__backend__protobuf_dot_common__pb2.SimpleResponse.SerializeToString,
),
'GetFoo': grpc.unary_unary_rpc_method_handler(
servicer.GetFoo,
request_deserializer=alvinchow__backend__protobuf_dot_common__pb2.IdRequest.FromString,
response_serializer=alvinchow__backend__protobuf_dot_foo__pb2.Foo.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'alvinchow_backend.AlvinChowService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AlvinChowService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/alvinchow_backend.AlvinChowService/Ping',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
alvinchow__backend__protobuf_dot_common__pb2.SimpleResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetFoo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/alvinchow_backend.AlvinChowService/GetFoo',
alvinchow__backend__protobuf_dot_common__pb2.IdRequest.SerializeToString,
alvinchow__backend__protobuf_dot_foo__pb2.Foo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| UTF-8 | Python | false | false | 4,449 | py | 129 | alvinchow_backend_pb2_grpc.py | 97 | 0.656102 | 0.652057 | 0 | 101 | 43.049505 | 118 |
KaiHe-better/CQARE | 10,376,640,998,205 | 8eac7bc68e113a08796e5a2ebcfa46619738b851 | e1136a9c73c2b987c1814ea6445214817de27ebf | /FSL/fewshot_re_kit/sentence_encoder/cnn_encoder.py | a77697f7b139031fdaeb593d15945311706351b8 | []
| no_license | https://github.com/KaiHe-better/CQARE | acf1e7fa3e0199fbee44701189ad1208fcc591cd | 380807a6d34f0140d10d0f17c8b8f0a8dfb7cbc3 | refs/heads/main | 2023-08-19T13:33:59.902558 | 2022-11-16T03:22:36 | 2022-11-16T03:22:36 | 411,121,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
import numpy as np
from .. import network
class CNNSentenceEncoder(nn.Module):
def __init__(self, word_vec_mat, word2id, max_length, word_embedding_dim=50,
pos_embedding_dim=5, hidden_size=230):
nn.Module.__init__(self)
self.hidden_size = hidden_size
self.max_length = max_length
self.embedding = network.embedding.Embedding(word_vec_mat, max_length,
word_embedding_dim, pos_embedding_dim)
self.encoder = network.encoder.Encoder(max_length, word_embedding_dim,
pos_embedding_dim, hidden_size)
self.word2id = word2id
def forward(self, inputs):
x = self.embedding(inputs)
x = self.encoder(x)
return x
def tokenize(self, raw_tokens, pos_head, pos_tail):
# token -> index
indexed_tokens = []
for token in raw_tokens:
token = token.lower()
if token in self.word2id:
indexed_tokens.append(self.word2id[token])
else:
indexed_tokens.append(self.word2id['[UNK]'])
# padding
while len(indexed_tokens) < self.max_length:
indexed_tokens.append(self.word2id['[PAD]'])
indexed_tokens = indexed_tokens[:self.max_length]
# pos
pos1 = np.zeros((self.max_length), dtype=np.int32)
pos2 = np.zeros((self.max_length), dtype=np.int32)
pos1_in_index = min(self.max_length, pos_head[0])
pos2_in_index = min(self.max_length, pos_tail[0])
for i in range(self.max_length):
pos1[i] = i - pos1_in_index + self.max_length
pos2[i] = i - pos2_in_index + self.max_length
# mask
mask = np.zeros((self.max_length), dtype=np.int32)
mask[:len(indexed_tokens)] = 1
return indexed_tokens, pos1, pos2, mask
class Classifier(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, num_classes, input_dim=768, inner_dim=200, pooler_dropout=0.2):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = nn.ReLU()
self.dropout = nn.Dropout(pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def init_param(self):
nn.init.xavier_normal_(self)
| UTF-8 | Python | false | false | 2,598 | py | 34 | cnn_encoder.py | 30 | 0.572748 | 0.557352 | 0 | 74 | 34.108108 | 91 |
KartinJulia/walkipon_backend | 7,387,343,788,694 | 23a2ab2a01ba3de946d1b976acb89b2f45ff806d | df3eef46f80603dabdc3c22f98a68eaa426bf9d7 | /test/test_personal_user_query_coupon_message_by_location.py | e402d7553d388706c40dc6723cf6ff4eab77eea2 | []
| no_license | https://github.com/KartinJulia/walkipon_backend | 02937a6890f805542f49e959854ef4e2765828be | 4d92a752dad21a68b95ea2217a24999510dd9441 | refs/heads/master | 2020-12-27T20:57:39.980856 | 2020-02-03T22:13:25 | 2020-02-03T22:13:25 | 238,051,487 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import requests
from requests import Request
import ast
import logging
try:
import http.client as http_client
except ImportError:
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
url = 'http://127.0.0.1:8000/v0/users/2/coupon_messages/locations'
playload = {
'latitude': '38.1967403',
'longitude': '-85.7305546',
'radius':,
'offset':,
'limit':,
}
cookie = {'sessionid': 'oxybqlkxreqkpt8izq93jvtfsfist5q0'}
headers = {
'Authorization': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6Ik5qTTNRVE14TTBaRVJEYzJRVEpGUlVORE9UaEROa1ZDUmpnek5UWTBRVFZHTXpJMk4wTkVNZyJ9.eyJpc3MiOiJodHRwczovL2RyZWFtZXIyMDE4LXByb3RvdHlwZS5hdXRoMC5jb20vIiwic3ViIjoiYXV0aDB8NWJhYzMzMmM5NjMxYTMzMjFkNGY0MjM0IiwiYXVkIjoiVGVzdCBBUEkiLCJpYXQiOjE1MzgwMTE5NzksImV4cCI6MTUzODA5ODM3OSwiYXpwIjoidlphX3hzZGd3QWFyMEtpUDlteS1HM2RzS3Q1a0J1QlkiLCJndHkiOiJwYXNzd29yZCJ9.SGQ6oX5qltTzgAihWPKqYhVWdRl60FC7JIWWSJSvzUQCz4qN223dooM9OLZ1tSpka-VpUXpork_nHfcp4hFbYPhyp9htS8-Og9eeyqxMzv75_Y75oC76ueWXNqAgZ8d-NTcpx57VNbCTMsKUIoMdPAi5QwjMgxWTjo5kCB-dqWCXVX9qfs-nJ2QbHf3xbsMa6XtI4SVPdOiblT07MEl1tghcr-TVhcyax5JaOsLAMxOaLwn0xRAw7FjOQqR5MvWtkQn7tgaR_Pin8BPMgQKWSwfxzJ09YLbdi-ifTu73Gnc8iGy0n_vzXJHkGZPJncMDhDe4fWU_YoiYFJ7mrgvFUQ'
}
r = requests.post(
url,
cookies = cookie,
data = playload,
headers = headers,
)
#r.json()
print(r.text) | UTF-8 | Python | false | false | 1,723 | py | 70 | test_personal_user_query_coupon_message_by_location.py | 66 | 0.78816 | 0.713871 | 0 | 44 | 38.181818 | 768 |
bingli8802/leetcode | 12,747,462,975,378 | 17c6035fff17469ebd9c3828d295be7c932a2758 | 4e02d5b0b1b0739553fd40bbbdfb0d02c9830350 | /0077_Combinations.py | 0d371da85136f82a064d5d1912c2e037bd2337aa | []
| no_license | https://github.com/bingli8802/leetcode | b039ab6af62f0c8992463393f561caafd21056e6 | a509b383a42f54313970168d9faa11f088f18708 | refs/heads/master | 2023-03-29T03:11:45.801090 | 2021-03-23T22:55:16 | 2021-03-23T22:55:16 | 279,321,659 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
# 典型的回溯算法 东哥模版
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
res = []
if not k or not n:
return
def backTrack(m, tmp):
if len(tmp) == k:
res.append(tmp)
return
for i in range(m+1, n+1):
backTrack(i, tmp + [i])
backTrack(0, [])
return res
| UTF-8 | Python | false | false | 493 | py | 319 | 0077_Combinations.py | 319 | 0.403397 | 0.397028 | 0 | 19 | 23.789474 | 39 |
walter-bd/scikit-decide | 12,841,952,263,939 | 1d40efe65b249e67bd8b86ba9ad6a305ab6a315e | 2d14aa082e33f3c9d2344ea6811a5b18ec906607 | /examples/discrete_optimization/rcpsp_multiskill_parser_example.py | 74a36b3e789e0601be6e79fdb2616ffdff450a61 | [
"MIT"
]
| permissive | https://github.com/walter-bd/scikit-decide | 4c0b54b7b2abdf396121cd256d1f931f0539d1bf | d4c5ae70cbe8b4c943eafa8439348291ed07dec1 | refs/heads/master | 2023-07-30T14:14:28.886267 | 2021-08-30T14:16:30 | 2021-09-03T06:46:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Utilities functions to parse Multiskill-RCPSP data files used in other examples scripts.
from __future__ import annotations
import os
path_to_data =\
os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/rcpsp_multiskill/dataset_def/")
def get_data_available_ms():
files = [f for f in os.listdir(path_to_data) if "pk" not in f and "json" not in f]
return [os.path.join(path_to_data, f) for f in files]
def get_complete_path_ms(root_path: str)->str: # example root_path="100_5_20_9_D3.def"
l = [f for f in get_data_available_ms() if root_path in f]
if len(l) > 0:
return l[0]
return None
| UTF-8 | Python | false | false | 808 | py | 280 | rcpsp_multiskill_parser_example.py | 242 | 0.688119 | 0.675743 | 0 | 23 | 33.956522 | 98 |
BonneelP/pyleecan | 6,219,112,695,546 | f6c6f51e044bb766962fd2e8d09ff3c6ee4f4a1f | 645a531f5470c4b5dcde2167361621da4a10737f | /Tests/Methods/Slot/test_VentilationCirc.py | df74b2d5e6fc9e8313a8e52b00471f145d173c7b | [
"Apache-2.0"
]
| permissive | https://github.com/BonneelP/pyleecan | f231940c35ac07861f031ec5386e39b60065a6c6 | 29e6b4358420754993af1a43048aa12d1538774e | refs/heads/master | 2023-01-05T07:55:58.152343 | 2020-11-06T12:21:33 | 2020-11-06T12:21:33 | 255,649,502 | 2 | 0 | NOASSERTION | true | 2020-04-14T15:35:02 | 2020-04-14T15:35:02 | 2020-04-14T14:24:40 | 2020-04-14T14:24:31 | 11,372 | 0 | 0 | 0 | null | false | false | # -*- coding: utf-8 -*-
from os.path import join
import pytest
from pyleecan.Classes.VentilationCirc import VentilationCirc
from pyleecan.Methods.Slot.VentilationCirc.build_geometry import (
CircleBuildGeometryError,
)
"""unittest for VentilationCirc"""
@pytest.mark.METHODS
class Test_VentilationCirc(object):
@pytest.fixture
def vent(self):
"""Run at the begining of every test to setup the VentilationCirc"""
test_obj = VentilationCirc(Zh=8, Alpha0=0, D0=5e-3, H0=40e-3)
return test_obj
def test_build_geometry_errors(self, vent):
"""Test that build_geometry can raise some errors"""
with pytest.raises(CircleBuildGeometryError) as context:
vent.build_geometry(sym=0.2)
with pytest.raises(CircleBuildGeometryError) as context:
vent.build_geometry(sym=1, alpha="dz")
with pytest.raises(CircleBuildGeometryError) as context:
vent.build_geometry(sym=1, alpha=1, delta="dz")
def test_build_geometry(self, vent):
"""Test that build_geometry works"""
result = vent.build_geometry()
assert len(result) == 8
| UTF-8 | Python | false | false | 1,149 | py | 236 | test_VentilationCirc.py | 187 | 0.678851 | 0.664056 | 0 | 37 | 30.054054 | 76 |
ramiab/dev | 19,069,654,800,070 | ea333be9f043ecb4ebbcd884e6111ed65f759646 | 6c8eed277d9cdeb83a88bb993bbf3315f8af55f9 | /py/djcode/mysite/pocs_linked_list/linked_list.py | 8d6094b6b73fc8ffaad53196ad69af92c0950894 | []
| no_license | https://github.com/ramiab/dev | 7a7ad00b2ebf7572d2e4fbc4090362bb29b8b825 | 8b53039017c3338ecf12c1b5241df811054e0db2 | refs/heads/master | 2020-12-24T13:17:48.263752 | 2014-02-12T21:24:09 | 2014-02-12T21:24:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from apt_pkg import init
__author__ = 'rami'
class Node(object):
def __init__( self, data, next ):
self.data = data
self.next = next
def add_before(self, value):
new_node = Node(value, self)
return new_node
def __repr__(self):
return '%s--%s' % self.data, self.next
def __str__(self):
return '%s' % self.data
def get_print_list_str(node, prefix=''):
if not node:
return '%s --> --||' % prefix
else:
prefix = '%s --> %s ' % (prefix, node)
return get_print_list_str( node.next, prefix)
def reverse_list_recursively( prev, node ):
if not node:
return prev
else:
new_head = reverse_list_recursively(node, node.next)
node.next = prev
return new_head
def reverse_list_iteratively( head ):
cur = head
prev = None
while cur:
next = cur.next
cur.next = prev
prev = cur
cur = next
head = prev
return head
| UTF-8 | Python | false | false | 997 | py | 13 | linked_list.py | 12 | 0.538616 | 0.538616 | 0 | 46 | 20.586957 | 60 |
CloudBoltSoftware/cloudbolt-forge | 8,160,437,909,158 | f04156e3be3be346ed560b9466c9a02e4e91b2cc | b6d8049568e8068422bc4ac2b957972dc1ee0ab7 | /blueprints/openstack/create_instance_from_volume.py | cd90cd63d4572ed383442f57c81cbc9f09f722b5 | [
"Apache-2.0"
]
| permissive | https://github.com/CloudBoltSoftware/cloudbolt-forge | a6dffd52bd074ad48a61527502fcddf8113508ff | a5584a84841be49bea69df506a91d18cb3f215d8 | refs/heads/master | 2023-08-11T08:08:07.730807 | 2023-08-10T11:40:07 | 2023-08-10T11:40:07 | 39,207,603 | 37 | 44 | Apache-2.0 | false | 2023-09-14T20:06:44 | 2015-07-16T16:18:02 | 2023-09-11T19:08:50 | 2023-09-14T20:06:43 | 12,102 | 36 | 32 | 16 | Python | false | false | import openstack
from openstack import connection
from infrastructure.models import CustomField, Environment
from utilities.models import ConnectionInfo
from resourcehandlers.openstack.models import OpenStackHandler
from infrastructure.models import CustomField
from common.methods import set_progress
CONN = ConnectionInfo.objects.get(name='Openstack')
assert isinstance(CONN, ConnectionInfo)
AUTH_URL = '{}://{}:{}/v3'.format(CONN.protocol, CONN.ip, CONN.port)
def generate_creds():
rh = OpenStackHandler.objects.first()
conn = connection.Connection(
region_name='RegionOne',
auth=dict(auth_url= AUTH_URL,
username=rh.serviceaccount, password=rh.servicepasswd,
project_id=rh.project_id,
user_domain_id=rh.domain),
compute_api_version='2.1', identity_interface='public', verify=False)
return conn
def generate_options_for_volume(server=None, **kwargs):
conn = generate_creds()
volumes = conn.list_volumes()
return [(vol.name) for vol in volumes]
def generate_options_for_image(server=None, **kwargs):
conn = generate_creds()
images = conn.list_images()
return [(image.name) for image in images]
def generate_options_for_flavor(server=None, **kwargs):
conn = generate_creds()
flavors = conn.list_flavors()
return [(flavor.name) for flavor in flavors]
def generate_options_for_network(server=None, **kwargs):
conn = generate_creds()
networks = conn.list_networks()
return [(net.name) for net in networks]
def generate_options_for_security_group(server=None, **kwargs):
conn = generate_creds()
groups = conn.list_security_groups()
return [(group.name) for group in groups]
def generate_options_for_key_pair_name(server=None, **kwargs):
conn = generate_creds()
keys = conn.list_keypairs()
return [(key.name) for key in keys]
def generate_options_for_availability_zone(server=None, **kwargs):
conn = generate_creds()
zones = conn.list_availability_zone_names()
return [(zone) for zone in zones]
def generate_options_for_floating_ips(**kwargs):
conn = generate_creds()
floating_ips = conn.list_floating_ips()
return [(ip.floating_ip_address) for ip in floating_ips]
def generate_options_for_cloudbolt_environment(server=None, **kwargs):
envs = Environment.objects.filter(
resource_handler__resource_technology__name="Openstack").values("id", "name")
return [(env['id'], env['name']) for env in envs]
def get_floating_ip_pool(**kwargs):
conn = generate_creds()
pool_list = conn.list_floating_ip_pools()
return [p['name'] for p in pool_list]
def create_required_custom_fields():
CustomField.objects.get_or_create(
name='env_id',
defaults={
'label': 'Environment ID',
'description': 'Used by Openstack blueprint',
'type': 'INT',
}
)
CustomField.objects.get_or_create(
name='server_name', defaults={
'label': 'Instance name', 'type': 'STR',
'description': 'Name of the openstack BP instance',
}
)
CustomField.objects.get_or_create(
name='volume', defaults={
'label': 'volume name', 'type': 'STR',
'description': 'Name of the openstack bootable volume',
}
)
def run(job, logger=None, **kwargs):
resource = kwargs.get('resource')
env_id = '{{ cloudbolt_environment }}'
env = Environment.objects.get(id=env_id)
env.resource_handler.cast()
name = '{{ hostname }}'
volume = '{{ volume }}'
flavor = '{{ flavor }}'
network = '{{ network }}'
security_group = '{{ security_group }}'
key_name = '{{ key_pair_name }}'
availability_zone = '{{ availability_zone }}'
floating_ip = '{{ floating_ips }}'
pool = get_floating_ip_pool()
conn = generate_creds()
server = conn.create_server(
name=name,
flavor=flavor,
boot_volume=volume,
network=network,
key_name=key_name,
security_groups=security_group,
)
conn.wait_for_server(server, ips=floating_ip, ip_pool=pool, timeout=180)
set_progress("Creating OpenStack node '{}' from boot volume '{}'".format(
name, volume)
)
# Save cluster data on the resource
create_required_custom_fields()
resource = kwargs['resource']
resource.env_id = env_id
resource.server_name = name
resource.volume = volume
resource.name = name
resource.save()
return "SUCCESS","Openstack instance created with name '{}',flavor '{}', network '{}', " \
"key pair '{}', boot volume '{}', floating ip '{}', security_group(s) '{}'".format(
name, flavor, network, key_name, volume, floating_ip, security_group), ""
| UTF-8 | Python | false | false | 4,805 | py | 656 | create_instance_from_volume.py | 475 | 0.64204 | 0.640791 | 0 | 150 | 31.033333 | 104 |
JMine97/ProblemSolvingByPy | 3,891,240,391,538 | 4029858f0b3e173b049c9d2ca4f2b158991a5fca | 8757819881b8f0a8d3f1198f86e1dbbf797d5bb9 | /week9/gyuri/10868번_최솟값.py | 6aedcf3f762a1ddc1ae88f11bc216e6907e8f14d | []
| no_license | https://github.com/JMine97/ProblemSolvingByPy | 122799a68910e38d57397f119816b235cc1c498b | 8a35dc11e523af9cc75cfd67f0136cf03e06f7cf | refs/heads/main | 2023-06-19T04:24:21.127921 | 2021-07-16T12:09:46 | 2021-07-16T12:09:46 | 387,479,666 | 1 | 0 | null | true | 2021-07-19T13:47:03 | 2021-07-19T13:47:02 | 2021-07-16T12:09:48 | 2021-07-16T12:09:46 | 378 | 0 | 0 | 0 | null | false | false | #10868번_최솟값.py
'''
N개의 정수,
(a, b)쌍이 M개
(1, 3) -> 1번, 2번, 3번 정수 중에서 최솟값
'''
import sys
from math import *
input = sys.stdin.readline
INF = int(1e9)
# 최소값이 최상단 루트 노드에 있는 tree 생성
def init(node, s, e):
if s == e:
tree[node] = num_list[s]
return tree[node]
mid = (s+e) // 2
tree[node] = min(init(node*2, s, mid), init(node*2+1, mid+1, e))
return tree[node]
def min_function(node, s, e, l, r):
if r < s or e < l:
return INF
# min을 구하는 거니깐 l이 s 범위안에 존재하기 때문에 더이상 재귀하지 않고 return
# ??????
if l <= s and e <= r:
return tree[node]
mid = (s+e) // 2
return min(min_function(node*2, s, mid, l, r), min_function(node*2+1, mid+1, e, l, r))
answer = []
n, m = map(int, input().split())
# list의 높이를 구하고 올림.
h = int(ceil(log2(n)))
# n_size = 2 ** (h+1)
# 1 을 왼쪽으로 h + 1 번 이동. 즉 2 ** h+1
t_size = 1 << (h+1)
tree = [0] * t_size
num_list = []
for _ in range(n):
num_list.append(int(input()))
init(1, 0, n-1)
for _ in range(m):
a, b = map(int, input().split())
answer.append(min_function(1,0,n-1,a-1,b-1))
for i in answer:
print(i)
| UTF-8 | Python | false | false | 1,273 | py | 248 | 10868번_최솟값.py | 247 | 0.538462 | 0.502262 | 0 | 51 | 20.666667 | 90 |
edoardottt/py-problems-solutions | 17,755,394,826,652 | 0fd6be67a10375469c5ee7f440a407a159765156 | a3798727d0af116d32ecbc80cc56e46a759d065a | /BackTracking/k_subset_generator.py | c435fb5ba62eb5752bf82c6dd706bd589b2f0efe | []
| no_license | https://github.com/edoardottt/py-problems-solutions | 39ae7e9ccb223fcc938c32e6fcad3e479f669df1 | 5f92341035b5a8dcd18508df30c732180b5c2f72 | refs/heads/master | 2023-03-03T09:53:53.931559 | 2023-02-27T07:17:10 | 2023-02-27T07:17:10 | 192,494,101 | 28 | 8 | null | false | 2019-07-15T19:00:55 | 2019-06-18T08:02:13 | 2019-07-15T09:35:39 | 2019-07-15T19:00:55 | 27 | 4 | 0 | 0 | Python | false | false | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 10:08:31 2019
@author: edoardottt
"""
# k-SUBSET GENERATOR
S = [0, 1, 2, 3, 4]
def ksott(n, i, k, x):
if i == n:
print(S)
if x >= k - n + i + 1:
S[i] = 0
ksott(n, i + 1, k, x)
if x < k:
S[i] = 1
ksott(n, i + 1, k, x + 1)
ksott(5, 0, 3, 0)
| UTF-8 | Python | false | false | 370 | py | 28 | k_subset_generator.py | 27 | 0.389189 | 0.313514 | 0 | 23 | 14.086957 | 35 |
phaustin/pythonlibs | 5,995,774,367,941 | 92436d911e69252f9a4163dc6acb7061e497a7ed | 1d61057dab56fddd1edba78ce10b3a5f96c18c60 | /diskinventory/readlist.py | 088ea5f5b45d707354966226b2d2cc3d44897b70 | []
| no_license | https://github.com/phaustin/pythonlibs | f4f1f450d3e9bb8ebac5ffdb834d3750d80ee38e | 35ed2675a734c9c63da15faf5386dc46c52c87c6 | refs/heads/master | 2022-04-29T00:45:34.428514 | 2022-03-06T17:16:30 | 2022-03-06T17:16:30 | 96,244,588 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
#on osx
#/usr/local/Cellar/coreutils/8.22/libexec/gnubin/ls
require an ls_list ext file generated by, for example
ls -R -l -Q --time-style=full-iso --time=status /home/phil/* > listing.txt (file size in bytes)
example
python readls.py ls_list.txt --db_file ls_list.db (default)
"""
import re,os
import dateutil.parser as du
from pytz import timezone
import datetime as dt
from pandas import DataFrame, Series
import dataset, site
import argparse, textwrap
home_dir=os.getenv('HOME')
site.addsitedir('%s/repos' % home_dir)
from pythonlibs.pyutils.silent_remove import silent_remove
linebreaks=argparse.RawTextHelpFormatter
descrip=textwrap.dedent(globals()['__doc__'])
parser = argparse.ArgumentParser(formatter_class=linebreaks,description=descrip)
parser.add_argument('filelist', nargs=1, type=str,help='filelist generated by ls')
parser.add_argument('--files_db', nargs='?', default='file_list.db',type=str,help='output filelist db')
args=parser.parse_args()
listfile=args.filelist[0]
dbname=args.files_db
dbstring='sqlite:///{:s}'.format(dbname)
silent_remove(dbname)
db = dataset.connect(dbstring)
table_name='files'
the_table = db.create_table(table_name)
the_table=db[table_name]
blanks=re.compile('\s+')
stripQuotes=re.compile('.*\"(.*)\".*')
getName=re.compile('(?P<left>.*)\"(?P<name>.*)\".*')
columnNames=['permission','links','owner','theGroup','size','date','directory','name']
counter=0
with open(listfile) as f:
counter=0
fileList=[]
for the_line in f:
if counter % 10000 == 0:
print "linecount: ",counter
newline=the_line.strip()
if len(newline)>0:
if newline[-1]==":":
#"/Users/phil/www.physics.mcgill.ca/~gang/ftp.transfer":
dirname=stripQuotes.match(newline)
dirname=dirname.group(1)
continue
else:
test=getName.match(newline)
#-rw-r--r-- 1 phil users 0 2005-10-06 12:28:09.000000000 -0700 "/home/phil/eosc211_fall2005.txt~"
if test:
#
# skip directories and symbolic links
#
if test.group("left")[0] == 'd' or test.group("left")[0] == 'l':
continue
#
# check for a path name like /home/phil/eosc211_fall2005.txt
#
root,filename=os.path.split(test.group("name"))
if len(root) > 0:
dirname=root
else:
#
# we've got a plain file name
#
filename=test.group("name")
permission,links,owner,theGroup,size,date,time,offset =\
blanks.split(test.group("left").strip())
size=int(size)
string_date=" ".join([date,time,offset])
date_with_tz=du.parse(string_date)
date_utc = date_with_tz.astimezone(timezone('UTC'))
timestamp=int(date_utc.strftime('%s'))
#columnNames=['permission','links','owner','theGroup','size','date','directory','name']
out=(permission,links,owner,theGroup,size,timestamp,dirname,filename)
record=dict(zip(columnNames,out))
the_table.insert(record)
## print string_date
## print date_utc
## print dt.datetime.fromtimestamp(timestamp)
counter+=1
print counter
| UTF-8 | Python | false | false | 3,923 | py | 144 | readlist.py | 101 | 0.52154 | 0.50548 | 0 | 100 | 35.87 | 124 |
erchauhannitin/prod-reports | 18,743,237,312,332 | 6c96e894d8d868a81f6c3abe03814b943029cd64 | 831dad75f7048f3db1171ed5f52b11589e4db09f | /readobject.py | 60606f01e655943b80644eb3dd0e2fc9c0b80ee1 | []
| no_license | https://github.com/erchauhannitin/prod-reports | dc82239b246365d3d70bf203ddeb7d8628b327a2 | 1dae8d57253f7a1573b50a9875f8b1afc38dfdb4 | refs/heads/master | 2022-10-14T13:02:26.536313 | 2020-06-13T17:44:04 | 2020-06-13T17:44:04 | 272,039,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Entry(object):
def __init__(self, NAME, ID, ROLES, STATUS):
super(Entry, self).__init__()
self.NAME = NAME
self.ID = ID
self.ROLES = ROLES
self.STATUS = STATUS
self.entries = []
def __str__(self):
return self.ID + " " + self.ROLES + " " + self.STATUS
def __repr__(self):
return self.__str__()
entries = []
file = open('cluster.txt', 'r').readlines()
title = file.pop(0)
timeStamp = file.pop(0)
header = file.pop(0)
for line in file:
words = line.split()
NAME, ID, ROLES, STATUS = [i for i in words]
entry = Entry(NAME, ID, ROLES, STATUS)
entries.append(entry)
maxEntry = max(entries, key=lambda entry: entry.NAME)
entries.sort(key=lambda entry: entry.ID, reverse=True)
print(entries)
| UTF-8 | Python | false | false | 788 | py | 4 | readobject.py | 2 | 0.588832 | 0.585025 | 0 | 31 | 24.419355 | 61 |
gokudomatic/cobiv | 16,509,854,294,594 | 87a7c3973d02fec6e520761411f2618e79fea2cc | 854f9c1101772b30baa3999fa5461b62e75f1754 | /cobiv/modules/hud_components/progresshud/__init__.py | a5cf8025fc6cfda5d28c4a0fb8476a7418c4530b | [
"MIT"
]
| permissive | https://github.com/gokudomatic/cobiv | fd27aa46be8389909827eee5fcb838142c3511d6 | c095eda704fab319fccc04d43d8099f1e8327734 | refs/heads/master | 2020-04-05T10:14:35.361695 | 2018-04-11T16:46:04 | 2018-04-11T16:46:04 | 81,542,555 | 4 | 1 | null | false | 2017-10-01T20:18:38 | 2017-02-10T08:10:10 | 2017-02-10T10:55:33 | 2017-10-01T20:18:38 | 11,529 | 0 | 0 | 3 | Python | null | null | __all__ = ["ProgressHud"]
| UTF-8 | Python | false | false | 26 | py | 91 | __init__.py | 79 | 0.538462 | 0.538462 | 0 | 1 | 25 | 25 |
wych/blog-deploy | 249,108,110,538 | 5189d1ed1e09e34123b4132bbe0d29cec706efd3 | 2d5a762ca32162e399e9dd0845e1d4c993e1cc7b | /main.py | 54257a19879d16528aa6b163b06f2acfd1e0799d | [
"MIT"
]
| permissive | https://github.com/wych/blog-deploy | e83f7c26b0679f17cf5b9c987af6a52981be0570 | 17d2e16040437fea881162f1f31d035401fdf626 | refs/heads/master | 2021-07-10T20:32:39.520645 | 2020-02-12T17:01:01 | 2020-02-12T17:01:01 | 240,032,195 | 0 | 0 | MIT | false | 2021-03-20T02:51:58 | 2020-02-12T14:24:45 | 2020-02-12T17:01:33 | 2021-03-20T02:51:58 | 6 | 0 | 0 | 1 | Python | false | false | from flask import Flask, request
import hashlib
import hmac
import secrets
import os
import threading
from utils import *
app = Flask(__name__)
config_path = os.environ['DEPLOY_CONFIG'] if os.environ.get('DEPLOY_CONFIG') else 'config.toml'
conf = Config.parse(config_path)
repo = Repo(conf)
builder = Builder(conf)
builder.gen_static()
builder.deploy()
mutex = threading.Lock()
@app.route(conf.listen_url, methods=['POST'])
def up2date():
if not secrets.compare_digest(request.headers.get('X-Hub-Signature'), verify_signature(request.data, conf.secret_key)):
return 'bad signature', 401
def thread_task():
mutex.acquire()
repo.update()
builder.gen_static()
builder.deploy()
mutex.release()
threading.Thread(target=thread_task).start()
return 'ok', 200
def verify_signature(data: bytes, secret_key: bytes):
signature = 'sha1=' + hmac.new(secret_key, msg=data, digestmod=hashlib.sha1).hexdigest()
return signature
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True) | UTF-8 | Python | false | false | 1,077 | py | 5 | main.py | 2 | 0.675952 | 0.660167 | 0 | 40 | 25.95 | 123 |
kalselini/python_assignment_offwell.py | 11,123,965,337,229 | 1e044bdb9c01a97bbe783ef6dcc5c2027d0c2e0e | 44276081fbfd3778495d587d60e45a7911bd1ca5 | /crimes_listed.py | d5bf722597768b9108d6243ccf64c9e9042cb296 | []
| no_license | https://github.com/kalselini/python_assignment_offwell.py | b35147acdf0162b447d98e9a95b74e5de8bb84db | 784d187cba32e790255343251760d5b8a281b115 | refs/heads/master | 2020-04-28T11:35:30.410799 | 2019-03-19T14:21:18 | 2019-03-19T14:21:18 | 175,247,491 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import file_finder
import geodist
import csv
def crimes_listed( latlngB, chosen_date ):
path = file_finder.find(chosen_date)
file = open(path,'rt')
with file as f:
csv_obj = csv.reader(f)
for line in csv_obj:
try:
lngA = line[4]
latA = line[5]
lngA = float(lngA)
latA = float(latA)
latlngA = (latA, lngA)
d = geodist.distance(latlngA, latlngB)
if d < 36:
crimes_listed = [d, line[6], line[9], line[10]]
print(crimes_listed)
except ValueError:
continue
#l = crimes_listed ( (50.71433865, -2.43619708), '2018-02')
| UTF-8 | Python | false | false | 766 | py | 5 | crimes_listed.py | 3 | 0.468668 | 0.425587 | 0 | 33 | 21.757576 | 67 |
lasofivec/slmp | 8,684,423,879,964 | db0ab96ac006ab1f6a37294e9c682958a06b088e | 7688465c3d00dd74c0410871c559f8e2d8e141c4 | /slmp/charac_feet.py | fbfbbab438df60115ce66cae51b647f46571322b | [
"MIT"
]
| permissive | https://github.com/lasofivec/slmp | fbeabe5dbd1448c67c9006407abd616c2d9954a3 | bda10f550aa61e7194bc200b66ed08dd3074e91f | refs/heads/master | 2021-06-28T02:08:42.961569 | 2020-12-22T17:02:35 | 2020-12-22T17:02:35 | 201,268,648 | 2 | 0 | null | false | 2020-12-22T17:02:11 | 2019-08-08T13:51:03 | 2020-12-22T15:44:41 | 2020-12-22T17:02:10 | 716 | 2 | 0 | 0 | Python | false | false | """
Module to compute characteristics' origin both in physical
and logical domain.
"""
from globals_variables import *
from geometry import geo
from geometry import npatchs
from geometry import jac
from geometry import X_mat, Y_mat
import connectivity as conn
def get_pat_char(eta1, eta2, advec, dt):
"""
Computes the characteristics in logical domain.
Note:
To compute characteristic's feet we have to
solve the following ODE :
d(Eta)/dt = 1/sqrtg rot(psi)
with i.c. Eta(s) = eta(0)
where
sqrtg = det(J) = det (Jacobian)
rot(Psi) = J^-1 * A * sqrtg
Args:
eta1: vector containing the coordinates of eta1
eta2: vector containing the coordinates of eta2
advec: advection coefficient for every direction, patch and point
dt: time step
Returns:
char_eta1: 1st coordinate of origin of characteristc in logical domain
char_eta2: 2nd coordinate of origin of characteristc in logical domain
"""
char_eta1 = np.zeros((npatchs, NPTS1, NPTS2))
char_eta2 = np.zeros((npatchs, NPTS1, NPTS2))
where_char = np.zeros((npatchs, NPTS1, NPTS2), dtype=np.int)
for npat, nrb in enumerate(geo):
print("")
print("For patch ", npat, " :")
eta1_mat, eta2_mat = my_meshgrid(eta1, eta2)
# Definition of right hand side of ODE :
rhs = lambda xatn, yatn: derivs_eta(geo, npat,
xatn, yatn,
[advec[0][npat], advec[1][npat]])
# We solve the ode using a Runge Kutta method of order 4
char_eta1[npat], char_eta2[npat], where_char[npat] \
= rk4(npat, eta1_mat, eta2_mat, dt, rhs)
print(" * char_eta1 max et min =", \
np.max(char_eta1[npat]), np.min(char_eta1[npat]))
print(" * char_eta2 max et min =", \
np.max(char_eta2[npat]), np.min(char_eta2[npat]))
return char_eta1, char_eta2, where_char
def rk4(npat, xn, yn, dt, rhs_func):
"""
Runge Kutta method of order 4 applied to ODE for
characteristics (see get_pat_char).
This function returns the results constrained to the patch.
No out of domain particle.
Args:
npat: patch index
xn: 1st coordinates at time tn
yn: 2nd coordinates at time tn
dt: time step
rhs_func: function given the RHS of the ODE
Returns:
xnp1, ynp1: coordinates of characteristics origin
advec_done_x, advec_done_y: advection done (=rhs unless
particle out of domain)
"""
# Runge-Kutta Method of order 4 :
k1x, k1y = rhs_func(xn, yn)
advec_x = 0.5 * dt * k1x
advec_y = 0.5 * dt * k1y
xn_05 = xn - advec_x
yn_05 = yn - advec_y
last_advec_percent = np.zeros_like(xn)
where_char = np.zeros_like(xn, dtype=np.int) + npat
contain_particles(xn, yn,
[advec_x, advec_y],
xn_05, yn_05,
where_char, last_advec_percent)
k2x, k2y = rhs_func(xn_05, yn_05)
advec_x = 0.5 * dt * k2x
advec_y = 0.5 * dt * k2y
xn_15 = xn - advec_x
yn_15 = yn - advec_y
last_advec_percent = np.zeros_like(xn)
where_char = np.zeros_like(xn, dtype=np.int) + npat
contain_particles(xn, yn,
[advec_x, advec_y],
xn_15, yn_15,
where_char, last_advec_percent)
k3x, k3y = rhs_func(xn_15, yn_15)
advec_x = dt * k3x
advec_y = dt * k3y
xn_25 = xn - advec_x
yn_25 = yn - advec_y
last_advec_percent = np.zeros_like(xn)
where_char = np.zeros_like(xn, dtype=np.int) + npat
contain_particles(xn, yn,
[advec_x, advec_y],
xn_25, yn_25,
where_char, last_advec_percent)
k4x, k4y = rhs_func(xn_25, yn_25)
advec_x = 0.5 * dt / 3. * (k1x + 2.0 * k2x + 2.0 * k3x + k4x)
advec_y = 0.5 * dt / 3. * (k1y + 2.0 * k2y + 2.0 * k3y + k4y)
xnp1 = xn - advec_x
ynp1 = yn - advec_y
last_advec_percent = np.zeros_like(xn)
where_char = np.zeros_like(xn, dtype=np.int) + npat
if DEBUG_MODE:
print("............... last call to contain particles .................")
contain_particles(xn, yn,
[advec_x, advec_y],
xnp1, ynp1,
where_char, last_advec_percent)
return xnp1, ynp1, where_char
def contain_particles(eta1_mat, eta2_mat,
advec_coeffs,
eta1_orig, eta2_orig,
where_char, last_advec_percent):
"""
Resets values of characteristics origin such that their
coordinates remain on the patch.
Furthermore it saves the value of the advection done in eta1 and eta2.
RECURSIVE FUNCTION.
Notes:
rhs_eta1, rhs_eta2, and dt, are only needed to compute
the advection done (when not the characteristics origin
leaves the patch). The value of the advection done in
each direction is computed and stored in
advec_done_1 and advec_done_2.
Args:
eta1_mat: 1st coordinates on patch (of all points)
eta2_mat: 2nd coordinates on patch (of all points)
rhs_eta1: 1st coordinate of RHS of the ODE to be solved
for characteristics origin
rhs_eta2: 2nd coordinate of RHS of the ODE to be solved
for characteristics origin
advec_done_1: [INOUT] value of the advection done in eta1
advec_done_2: [INOUT] value of the advection done in eta2
eta1_orig: [INOUT] 1st coordinate of characteristic's origin
truncated so that it stays in patch
eta2_orig: [INOUT] 2nd coordinate of characteristic's origin
truncated so that it stays in patch
where_char: [INOUT] index of patch where the origin of the
characteristic is situated.
Returns:
"""
# Separating advection:
[advec_coef1, advec_coef2] = advec_coeffs
# Rounding off small values to avoid useless loops :
eta1_orig[np.where(abs(eta1_orig) < epsilon2)] = 0.
eta2_orig[np.where(abs(eta2_orig) < epsilon2)] = 0.
eta1_orig[np.where(abs(1. - eta1_orig) < epsilon2)]=1.
eta2_orig[np.where(abs(1. - eta2_orig) < epsilon2)]=1.
advec_coef1[np.where(abs(advec_coef1) < epsilon2)] = 0.
advec_coef2[np.where(abs(advec_coef2) < epsilon2)] = 0.
#...................................................
if DEBUG_MODE :
print("___________________________________________")
print("----- eta mat")
print(eta1_mat[0, 48])
print(eta2_mat[0, 48])
print("---------- advec")
print(advec_coef1[0, 48])
print(advec_coef2[0, 48])
print("---------- origins")
print(eta1_orig[0, 48])
print(eta2_orig[0, 48])
print("---------- where")
print(where_char[0, 48])
print("---------- last percent")
print(last_advec_percent[0, 48])
print("___________________________________________")
# For particles that stay in the domain
in_indices = np.where((eta1_orig >= 0.) & (eta1_orig <= 1.)
& (eta2_orig >= 0.) & (eta2_orig <= 1.))
# Nothing to for particles that are in domain
last_advec_percent[in_indices] = 1
if (last_advec_percent == 1).all() and DEBUG_MODE :
print("results =========")
print("origins of chars: ")
print(eta1_orig)
print(eta2_orig)
print("in the pats:", where_char)
print("percent :", last_advec_percent)
print("===== leaving 1 ========")
return
# For particles that go out on the x axis, below 0 :
where_out = np.where((eta1_orig < 0.) & (last_advec_percent < 1.))
if np.size(where_out) > 0:
contain_particles_1D(where_char, where_out, last_advec_percent,\
False, True, \
eta1_mat, eta2_mat, \
advec_coef1, advec_coef2, \
eta1_orig, eta2_orig, \
1)
# For particles that go out on the x axis, above 1 :
where_out = np.where((eta1_orig - 1. > 0.) & (last_advec_percent < 1.))
if np.size(where_out) > 0:
contain_particles_1D(where_char, where_out, last_advec_percent,\
True, True, \
eta1_mat, eta2_mat, \
advec_coef1, advec_coef2, \
eta1_orig, eta2_orig, \
3)
# For particles that go out on the y axis, below 0 :
where_out = np.where((eta2_orig < 0.) & (last_advec_percent < 1.))
if np.size(where_out) > 0:
contain_particles_1D(where_char, where_out, last_advec_percent,\
False, False, \
eta2_mat, eta1_mat, \
advec_coef2, advec_coef1, \
eta2_orig, eta1_orig, \
0)
# For particles that go out on the y axis, above 1 :
where_out = np.where((eta2_orig - 1.0 > 0.) & (last_advec_percent < 1.))
if np.size(where_out) > 0:
contain_particles_1D(where_char, where_out, last_advec_percent,\
True, False, \
eta2_mat, eta1_mat, \
advec_coef2, advec_coef1, \
eta2_orig, eta1_orig, \
2)
eta1_orig[np.where(abs(eta1_orig) < epsilon2)] = 0.
eta2_orig[np.where(abs(eta2_orig) < epsilon2)] = 0.
if DEBUG_MODE:
print("results =========")
print("origins of chars: ")
print(eta1_orig)
print(eta2_orig)
print("in the pats:", where_char)
print("percent :", last_advec_percent)
print("________leaving 2__________")
out_indices = np.where((eta1_orig < 0.) | (eta1_orig > 1.)
| (eta2_orig < 0.) | (eta2_orig > 1.))
if (np.size(out_indices) > 0):
print("ERROR in contain_particles(): "\
+"some particles are still outside domain!")
print("indices = ", out_indices)
print(eta1_orig[out_indices])
print(eta2_orig[out_indices])
print("")
import os, sys
sys.exit("STOP")
def contain_particles_1D(where_char, where_out, last_advec_percent,\
is_above1, is_eta1,\
eta_out, eta_in, \
advec_out, advec_in,\
eta_out_orig, eta_in_orig, \
face):
# We compute the actual percent we could advect from
current_percent = (eta_out[where_out] - 1.*is_above1)/ advec_out[where_out]
# Checking if the particle is not still out of the patch
temp = eta_in[where_out] - current_percent * advec_in[where_out]
where_out2 = where_out[0][np.where((temp >= 0.) & (temp <=1.))[0]]
if np.shape(where_out)[0]!=1:
where_out3 = where_out[1][np.where((temp >= 0.) & (temp <=1.))[0]]
where_out = (where_out2, where_out3)
else:
where_out = (where_out2,)
# updating the percent:
current_percent = (eta_out[where_out] -1.*is_above1) / advec_out[where_out]
# ....
last_advec_percent[where_out] += current_percent
# We compute where the particle stayed
eta_out_orig[where_out] = 1.*is_above1
eta_in_orig[where_out] = eta_in[where_out] \
- current_percent * advec_in[where_out]
# Looking for the neigh. patch and transforming the coord to that patch
[list_pats, list_faces] = conn.connectivity(where_char[where_out], face)
# We treat external external boundary conditions here,
# ie when [npat, face] = [npat', face']
where_not_dirichlet = np.where((where_char[where_out] != list_pats)
& (np.asarray(list_faces) != face))[0]
where_is_dirichlet = np.where((where_char[where_out] == list_pats)
& (np.asarray(list_faces) == face))[0]
if np.shape(where_out)[0]!=1:
where_dirichlet = [where_out[0][where_is_dirichlet],
where_out[1][where_is_dirichlet]]
where_out = [where_out[0][where_not_dirichlet],
where_out[1][where_not_dirichlet]]
else:
where_dirichlet = where_out[0][where_is_dirichlet]
np.reshape(where_dirichlet,(1, np.size(where_dirichlet)))
where_out = where_out[0][where_not_dirichlet]
np.reshape(where_out,(1, np.size(where_out)))
# Treating DIRICHLET BC .........................
# We can not advec any further so we impose the percent to complete state
last_advec_percent[where_dirichlet] = 1.
#................................................
if np.size(where_out) > 0:
where_orig = where_char[where_out]
list_pats2 = [list_pats[val0] for index0, val0 \
in enumerate(where_not_dirichlet)]
where_char[where_out] = list_pats2
[eta1_out_xmin, eta2_out_xmin] = \
conn.transform_patch_coordinates(eta_in_orig[where_out], list_faces)
if (is_eta1) :
[advec1, advec2] = \
conn.transform_advection(\
advec_out[where_out], advec_in[where_out],
where_orig, where_char[where_out],
eta_out_orig[where_out], eta_in_orig[where_out], \
eta1_out_xmin, eta2_out_xmin)
# We get the origin point
eta_out_orig[where_out] = eta1_out_xmin
eta_in_orig[where_out] = eta2_out_xmin
else :
[advec1, advec2] = \
conn.transform_advection(\
advec_in[where_out], advec_out[where_out],
where_orig, where_char[where_out],
eta_in_orig[where_out], eta_out_orig[where_out], \
eta1_out_xmin, eta2_out_xmin)
# We get the origin point
eta_in_orig[where_out] = eta1_out_xmin
eta_out_orig[where_out] = eta2_out_xmin
# Now we advect with the remaining percentage left
eta1_out_xmin += -advec1 * (1. - last_advec_percent[where_out])
eta2_out_xmin += -advec2 * (1. - last_advec_percent[where_out])
# and we contain the particles again
this_percent = last_advec_percent[where_out]
if (this_percent < 0.).any():
import sys
sys.exit("In contain_particles_1D: negative percentage found")
if (is_eta1):
e1o = eta_out_orig[where_out]
e2o = eta_in_orig[where_out]
wc = where_char[where_out]
contain_particles(e1o, e2o, \
[advec1, advec2], \
eta1_out_xmin, eta2_out_xmin, \
wc, this_percent)
# We can now replace with new values:
eta_out_orig[where_out] = eta1_out_xmin
eta_in_orig[where_out] = eta2_out_xmin
where_char[where_out] = wc
last_advec_percent[where_out] = this_percent
else:
e1o = eta_in_orig[where_out]
e2o = eta_out_orig[where_out]
wc = where_char[where_out]
contain_particles(e1o, e2o, \
[advec1, advec2], \
eta1_out_xmin, eta2_out_xmin, \
wc, this_percent)
# We can now replace with new values:
eta_in_orig[where_out] = eta1_out_xmin
eta_out_orig[where_out] = eta2_out_xmin
where_char[where_out] = wc
last_advec_percent[where_out] = this_percent
# else:
# print ""
# print " where out is empty"
# print ""
def derivs_eta(geo, npat, eta1_mat, eta2_mat, advec):
# Getting the jacobian
[d1F1, d2F1, d1F2, d2F2] = jacobian(geo, npat, eta1_mat, eta2_mat)
# Computing the jacobian determinant
sqrt_g = d1F1 * d2F2 - d1F2 * d2F1
wjm = np.where(abs(sqrt_g) < epsilon2)
if (np.size(wjm) > 0):
print("Warning : singular point")
print(np.size(wjm))
sqrt_g[wjm] = epsilon2
# Calculating the value of the second part of the MoC
rhs1 = (advec[0] * d2F2 - advec[1] * d2F1) / sqrt_g
rhs2 = (advec[1] * d1F1 - advec[0] * d1F2) / sqrt_g
return rhs1, rhs2
def jacobian(geo, npat, eta1_mat, eta2_mat):
"""
Computes jacobian in points given.
Args:
nrb: Contains info about transformations and patches, given by igakit
eta1_mat: matrix containing eta1 coordinates
eta2_mat: matrix containing eta2 coordinates
Returns:
[d1F1, d2F1, d1F2, d2F2]: list containing the values
of the jacobian matrix at given points.
"""
u = eta1_mat[:, 0]
v = eta2_mat[0, :]
wum = np.where(u < 0.)
wup = np.where(u > 1.)
if ((np.size(wum) > 0) or (np.size(wup) > 0)):
print("Warning: in jacobian() from charac_feet.py:")
print(" Found a value outside [0,1] in vector u")
u[wum] = 0.
u[wup] = 1.
wvm = np.where(v < 0.)
wvp = np.where(v > 1.)
if ((np.size(wvm) > 0) or (np.size(wvp) > 0)):
print("Warning: in jacobian() from charac_feet.py:")
print(" Found a value outside [0,1] in vector v")
v[wvm] = 0.
v[wvp] = 1.
d1F1 = np.zeros((NPTS1, NPTS2))
d2F1 = np.zeros((NPTS1, NPTS2))
d1F2 = np.zeros((NPTS1, NPTS2))
d2F2 = np.zeros((NPTS1, NPTS2))
# Getting the derivatives
d1F1 = jac[npat,0,:,:]
d2F1 = jac[npat,1,:,:]
d1F2 = jac[npat,2,:,:]
d2F2 = jac[npat,3,:,:]
# Getting rid of close to 0 values
d1F1[np.where(abs(d1F1) <= small_epsilon)] = 0.0
d2F1[np.where(abs(d2F1) <= small_epsilon)] = 0.0
d1F2[np.where(abs(d1F2) <= small_epsilon)] = 0.0
d2F2[np.where(abs(d2F2) <= small_epsilon)] = 0.0
return [d1F1, d2F1, d1F2, d2F2]
def get_phy_char(npatchs, advec, which_advec, tstep, dt):
"""
Computes characteristics' origin in the physical domain.
Args:
npatchs: number of patches
advec: advection coefficients on each coordinate of each patch
which_advec: type of advection (int)
tstep: number of iteration being done in time
dt: step in time
Returns:
xnp1: 1st coordinates of characteristics origin at time tstep
ynp1: 2nd coordinates of characteristics origin at time tstep
"""
xnp1 = np.zeros((npatchs, NPTS1, NPTS2))
ynp1 = np.zeros((npatchs, NPTS1, NPTS2))
for npat in range(npatchs):
if which_advec == 0:
xnp1[npat] = X_mat[npat] - advec[0][npat] * tstep * dt
ynp1[npat] = Y_mat[npat] - advec[1][npat] * tstep * dt
elif which_advec == 1:
r = np.sqrt(X_mat[npat] ** 2 + Y_mat[npat] ** 2)
th = np.arctan2(Y_mat[npat], X_mat[npat])
# TODO : maybe it's better to use directly X_mat and Y_mat
xnp1[npat] = r * np.cos(-2. * np.pi * tstep * dt + th)
ynp1[npat] = r * np.sin(-2. * np.pi * tstep * dt + th)
elif which_advec == 2:
r = np.sqrt(advec[0][npat] ** 2 + advec[1][npat] ** 2)
th = np.arctan2(advec[0][npat], -advec[1][npat])
# TODO : maybe it's better to use directly X_mat and Y_mat
xnp1[npat] = r * np.sin((tstep) * dt + np.pi / 2. - th) + centera
ynp1[npat] = r * np.cos((tstep) * dt + np.pi / 2. - th) + centerb
else:
print("ERROR: in get_phy_char() not known advection.")
import os, sys
sys.exit("STOP")
return xnp1, ynp1
| UTF-8 | Python | false | false | 19,916 | py | 34 | charac_feet.py | 24 | 0.534746 | 0.506327 | 0 | 529 | 36.648393 | 81 |
dzp2095/grabClass | 601,295,462,239 | 20ee9f2edf11bc47510c7429b7206c8b5d1da0c2 | 46c330e8697c308cb6b38d096ba38bc2960127b9 | /urlThread/LoginThread.py | cfbc96c92c006d0f8ba1c769432392a4bd107426 | []
| no_license | https://github.com/dzp2095/grabClass | 5a082259de56a3c61d1377a73aa836e44bd4e8c4 | a67da4c441f2f40571b720f44c520ad650799aec | refs/heads/master | 2021-05-12T17:34:37.195557 | 2018-07-05T03:47:59 | 2018-07-05T03:47:59 | 117,049,233 | 13 | 6 | null | false | 2018-01-12T06:20:49 | 2018-01-11T04:17:42 | 2018-01-12T06:01:10 | 2018-01-12T06:20:49 | 55 | 3 | 1 | 0 | Python | false | null | # -*- coding:utf-8 -*-
import urllib
from pyquery import PyQuery
from PyQt5 import QtCore
#登录工作线程
class LoginThread(QtCore.QThread):
#工作完成后将用户信息发回界面线程中
finishSignal = QtCore.pyqtSignal(dict)
def __init__(self,opener,userNumber,password,parent=None):
super(LoginThread,self).__init__(parent)
self.userNumber=userNumber
self.password=password
#从主线程发来的opener 附带cookie处理
self.opener = opener
self.exitcode = 0
def run(self):
user_data = {
'password': self.password,
"systemId": "",
"type": "xs",
"userName": self.userNumber,
"xmlmsg": ""
}
url = "http://sso.jwc.whut.edu.cn/Certification/login.do"
postData = urllib.parse.urlencode(user_data).encode()
try:
op = self.opener.open(url, postData,timeout=5)
data = op.read()
pqData = PyQuery(data)
# 解析用户信息
name = pqData(".f_01 span:nth-child(1)").text()
ipAddress = pqData(".f_01 span:nth-child(4)").text()
phoneNumber = pqData(".f_01 span:nth-child(3)").text()
loginTime = pqData(".f_01 span:nth-child(5)").text()
url = "http://202.114.90.180/Course/"
op = self.opener.open(url,timeout=8)
# 工作完成,将用户信息发回界面线程
self.finishSignal.emit({
"userName": name,
"ipAddress": ipAddress,
"phoneNumber": phoneNumber,
"loginTime": loginTime
})
except Exception as e:
self.exitcode = 1 # 如果线程异常退出,将该标志位设置为1,正常退出为0
self.finishSignal.emit({}) | UTF-8 | Python | false | false | 1,903 | py | 13 | LoginThread.py | 10 | 0.529513 | 0.511748 | 0 | 48 | 35.375 | 66 |
aleross/MITx | 12,713,103,215,041 | 3d3001d70a4da282c4ce04ff8afbc78e4e03f1be | 3bf1a7ef874003a804d8326d4f27aceed5f9c488 | /6.86x/Unit 3/mnist/part2-mnist/nnet_fc.py | c9c4944be586a70ec7c15b3004bdcf6412af12aa | []
| no_license | https://github.com/aleross/MITx | fe34b4a84a6d67f7310e15e6b07658256cfc8665 | e26b53b6652b61aee0bd5ee5453344d0e0e1afe7 | refs/heads/master | 2020-05-22T11:11:55.058001 | 2019-07-30T17:29:53 | 2019-07-30T17:29:53 | 186,317,311 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
import _pickle as cPickle, gzip
import numpy as np
from tqdm import tqdm
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn as nn
import sys
sys.path.append("..")
import utils
from utils import *
from train_utils import batchify_data, run_epoch, train_model
import copy
def main():
# Load the dataset
num_classes = 10
X_train, y_train, X_test, y_test = get_MNIST_data()
# Split into train and dev
dev_split_index = int(9 * len(X_train) / 10)
X_dev = X_train[dev_split_index:]
y_dev = y_train[dev_split_index:]
X_train = X_train[:dev_split_index]
y_train = y_train[:dev_split_index]
permutation = np.array([i for i in range(len(X_train))])
np.random.shuffle(permutation)
X_train = [X_train[i] for i in permutation]
y_train = [y_train[i] for i in permutation]
#validation_scores = [0.932487, 0.944388, 0.937834, 0.907587, 0.936497]
#validation_scores2 = [0.977440, 0.977487, 0.978610, 0.968416, 0.978443]
validation_scores = []
best_validation = {'score': 0, 'param': None}
test_scores = []
baseline = {
'batch_size': 32,
'activation': nn.ReLU(),
'lr': 0.1,
'momentum': 0
}
grid = [(), ('batch_size', 64), ('lr', 0.1), ('momentum', 0.9), ('activation', nn.LeakyReLU())]
for p in grid:
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321) # for reproducibility
print('Testing param:', p)
params = copy.deepcopy(baseline)
if len(p):
params[p[0]] = p[1]
# Split dataset into batches
batch_size = params['batch_size']
train_batches = batchify_data(X_train, y_train, batch_size)
dev_batches = batchify_data(X_dev, y_dev, batch_size)
test_batches = batchify_data(X_test, y_test, batch_size)
#################################
## Model specification
model = nn.Sequential(
nn.Linear(784, 128),
params['activation'],
nn.Linear(128, 10),
)
lr = params['lr']
momentum = params['momentum']
##################################
train_model(train_batches, dev_batches, model, lr=lr, momentum=momentum)
## Evaluate on validation data
loss, accuracy = run_epoch(dev_batches, model.eval(), None)
validation_scores += [accuracy]
if accuracy > best_validation['score'] and len(p):
best_validation['score'] = accuracy
best_validation['param'] = p[0]
## Evaluate the model on test data
loss, accuracy = run_epoch(test_batches, model.eval(), None)
test_scores += [accuracy]
print ("Loss on test set:" + str(loss) + " Accuracy on test set: " + str(accuracy))
print('Best validation:', best_validation)
print('Validation scores:', validation_scores)
print('Test scores:', test_scores)
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321) # for reproducibility
main()
| UTF-8 | Python | false | false | 3,307 | py | 26 | nnet_fc.py | 11 | 0.582099 | 0.545207 | 0 | 100 | 32.07 | 111 |
mtoqeerpk/geodat | 14,903,536,554,774 | ea701b632c440e7dd09fb380eb1601c216fd0111 | e7e84591e073a856a05fae8e915151ce587e8126 | /geodat/math.py | c7460198b6d92bf5d40d6baba7e5131782c88b6c | [
"MIT"
]
| permissive | https://github.com/mtoqeerpk/geodat | 50c54255521b2ea651df55658fbf07f7c74ee657 | 5f6923b58249e99ded4caa62bf838f310f590d9a | refs/heads/master | 2020-07-25T15:07:01.237296 | 2016-03-01T17:46:30 | 2016-03-01T17:46:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy
from . import keepdims
def integrate(data, axes=None, iax=None, **kwargs):
''' Integrate data along a selected set of axes
Dimension is maintained using keepdims.sum
data - numpy.ndarray
axes - a list of axes
iax - a list of integers that select which axes are integrated along
'''
if axes is None:
axes = [numpy.arange(idim) for idim in data.shape]
if iax is None:
iax = range(data.ndim)
if type(iax) is not list:
iax = [iax]
inc = numpy.ones(data.shape, dtype=data.dtype)
for ax in iax:
ax_data = numpy.array(axes[ax], dtype=data.dtype)
dx = numpy.gradient(ax_data)[(numpy.newaxis,)*ax + \
(slice(None),) + \
(numpy.newaxis,)*(data.ndim-ax-1)]
inc *= numpy.abs(dx)
# Perform integration
return keepdims.sum(data*inc, axis=iax, **kwargs)
def gradient(f, dx=1., axis=0, mask_boundary=False):
'''Compute central difference for a pariticular axis
Input:
f - numpy ndarray
dx - spacing
'''
if f.shape[axis] <= 1:
raise ValueError("Length of axis {} must be >1".format(axis))
result = numpy.ma.zeros(f.shape, dtype=f.dtype)
axis = axis % f.ndim
sl_right = (slice(None),)*axis \
+ (slice(2, None),) + (slice(None),)*(f.ndim-axis-1)
sl_left = (slice(None),)*axis \
+ (slice(0, -2),) + (slice(None),)*(f.ndim-axis-1)
sl_center = (slice(None),)*axis \
+ (slice(1, -1),) + (slice(None),)*(f.ndim-axis-1)
# Make sure all dimension of dx have len>1
dx = dx.squeeze()
# Broadcasting dx
sl_dx_center = (numpy.newaxis,)*axis + (slice(1, -1),) \
+ (numpy.newaxis,)*(f.ndim-axis-1)
if numpy.isscalar(dx):
result[sl_center] = (f[sl_right] - f[sl_left])/2./dx
elif result.ndim == dx.ndim:
result[sl_center] = (f[sl_right] - f[sl_left])/2./dx[sl_center]
else:
result[sl_center] = (f[sl_right] - f[sl_left])/2./dx[sl_dx_center]
# Boundary values
b1 = (slice(None),)*axis \
+ (slice(0, 1),) + (slice(None),)*(f.ndim-axis-1)
b2 = (slice(None),)*axis \
+ (slice(-1, None),) + (slice(None),)*(f.ndim-axis-1)
if mask_boundary:
result[b1] = numpy.ma.masked
result[b2] = numpy.ma.masked
else:
b1_p1 = (slice(None),)*axis \
+ (slice(1, 2),) + (slice(None),)*(f.ndim-axis-1)
b2_m1 = (slice(None),)*axis \
+ (slice(-2, -1),) + (slice(None),)*(f.ndim-axis-1)
sl_dx_0 = (numpy.newaxis,)*axis + (slice(0, 1),) \
+ (numpy.newaxis,)*(f.ndim-axis-1)
sl_dx_end = (numpy.newaxis,)*axis + (slice(-1, None),) \
+ (numpy.newaxis,)*(f.ndim-axis-1)
if numpy.isscalar(dx):
result[b1] = (f[b1_p1] - f[b1])/dx
result[b2] = (f[b2] - f[b2_m1])/dx
elif result.ndim == dx.ndim:
result[b1] = (f[b1_p1] - f[b1])/dx[b1]
result[b2] = (f[b2] - f[b2_m1])/dx[b2]
else:
result[b1] = (f[b1_p1] - f[b1])/dx[sl_dx_0]
result[b2] = (f[b2] - f[b2_m1])/dx[sl_dx_end]
return result
def div(ux, uy, dx, dy, xaxis=-1, yaxis=-2):
''' Compute the divergence of a vector field ux,uy
dx, dy are either 1-d array or a scalar
xaxis - integer indicating the location of x axis (default = -1)
yaxis - integer indicating the location of y axis (default = -2)
'''
result = gradient(ux, dx, axis=xaxis, mask_boundary=True) + \
gradient(uy, dy, axis=yaxis, mask_boundary=True)
return result
def _div(ux, uy, dx, dy):
''' Backup - Compute the divergence of a vector field ux,uy
dx, dy are either 1-d array or a scalar
xaxis - integer indicating the location of x axis (default = -1)
yaxis - integer indicating the location of y axis (default = -2)
'''
ny, nx = ux.shape[-2:]
if numpy.isscalar(dx) or dx.ndim <= 1:
dx = numpy.resize(dx, (nx, ny)).T
if numpy.isscalar(dy) or dy.ndim <= 1:
dy = numpy.resize(dy, (ny, nx))
result = numpy.ma.zeros(ux.shape)
result[..., 1:-1, 1:-1] = \
(ux[..., 1:-1, 2:]-ux[..., 1:-1, 0:-2])/2./dx[1:-1, 1:-1] \
+ (uy[..., 2:, 1:-1]-uy[..., 0:-2, 1:-1])/2./dy[1:-1, 1:-1]
result[..., 0, :] = numpy.ma.masked
result[..., -1, :] = numpy.ma.masked
result[..., :, 0] = numpy.ma.masked
result[..., :, -1] = numpy.ma.masked
return result
| UTF-8 | Python | false | false | 4,536 | py | 47 | math.py | 28 | 0.533069 | 0.507716 | 0 | 118 | 37.440678 | 74 |
LB-Yu/tinyflow | 5,188,320,519,395 | d9ce75801ae7478d117ed39d5df743993d4def5c | 5c4bca1f2678c19bef7c19452511066758b85043 | /tests/mnist_dlsys.py | b758ad75146fd8ce7d8b3fd72cb292a25312cead | [
"MIT"
]
| permissive | https://github.com/LB-Yu/tinyflow | ad3083fc5e2999e1786130d7c6eb440318d105d1 | 772669a983f18d78b48fd9108d0fcbc942487247 | refs/heads/master | 2023-05-25T17:38:21.428289 | 2023-05-03T03:52:04 | 2023-05-03T03:52:04 | 260,410,127 | 55 | 15 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tinyflow import autodiff as ad
from tinyflow import ndarray, gpu_op
import numpy as np
import argparse
import six.moves.cPickle as pickle
import gzip
import os
def load_mnist_data(dataset):
""" Load the dataset
Code adapted from http://deeplearning.net/tutorial/code/logistic_sgd.py
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
"""
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
print('Loading data...')
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
# train_set, valid_set, test_set format: tuple(input, target)
# input is a numpy.ndarray of 2 dimensions (a matrix), np.float32
# where each row corresponds to an example. target is a
# numpy.ndarray of 1 dimension (vector), np.int64 that has the same length
# as the number of rows in the input. It should give the target
# to the example with the same index in the input.
return train_set, valid_set, test_set
def convert_to_one_hot(vals):
"""Helper method to convert label array to one-hot array."""
one_hot_vals = np.zeros((vals.size, vals.max()+1))
one_hot_vals[np.arange(vals.size), vals] = 1
return one_hot_vals
def sgd_update_gpu(param, grad_param, learning_rate):
"""Helper GPU SGD update method. Avoids copying NDArray to cpu."""
assert isinstance(param, ndarray.NDArray)
assert isinstance(grad_param, ndarray.NDArray)
gpu_op.matrix_elementwise_multiply_by_const(
grad_param, -learning_rate, grad_param)
gpu_op.matrix_elementwise_add(param, grad_param, param)
def mnist_logreg(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False):
print("Build logistic regression model...")
W1 = ad.Variable(name="W1")
b1 = ad.Variable(name="b1")
X = ad.Variable(name="X")
y_ = ad.Variable(name="y_")
z1 = ad.matmul_op(X, W1)
y = z1 + ad.broadcastto_op(b1, z1)
loss = ad.softmaxcrossentropy_op(y, y_)
grad_W1, grad_b1 = ad.gradients(loss, [W1, b1])
executor = ad.Executor([loss, grad_W1, grad_b1, y], ctx=executor_ctx)
# Read input data
datasets = load_mnist_data("mnist.pkl.gz")
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# Set up minibatch
batch_size = 1000
n_train_batches = train_set_x.shape[0] // batch_size
n_valid_batches = valid_set_x.shape[0] // batch_size
print("Start training loop...")
# Initialize parameters
W1_val = np.zeros((784, 10))
b1_val = np.zeros((10))
X_val = np.empty(shape=(batch_size, 784), dtype=np.float32)
y_val = np.empty(shape=(batch_size, 10), dtype=np.float32)
valid_X_val = np.empty(shape=(batch_size, 784), dtype=np.float32)
valid_y_val = np.empty(shape=(batch_size, 10), dtype=np.float32)
if ndarray.is_gpu_ctx(executor_ctx):
W1_val = ndarray.array(W1_val, ctx=executor_ctx)
b1_val = ndarray.array(b1_val, ctx=executor_ctx)
X_val = ndarray.array(X_val, ctx=executor_ctx)
y_val = ndarray.array(y_val, ctx=executor_ctx)
lr = 1e-3
for i in range(num_epochs):
print("epoch %d" % i)
for minibatch_index in range(n_train_batches):
minibatch_start = minibatch_index * batch_size
minibatch_end = (minibatch_index + 1) * batch_size
X_val[:] = train_set_x[minibatch_start:minibatch_end]
y_val[:] = convert_to_one_hot(
train_set_y[minibatch_start:minibatch_end])
loss_val, grad_W1_val, grad_b1_val, _ = executor.run(
feed_dict = {X: X_val, y_: y_val, W1: W1_val, b1: b1_val})
# SGD update
if (executor_ctx is None):
W1_val = W1_val - lr * grad_W1_val
b1_val = b1_val - lr * grad_b1_val
else:
sgd_update_gpu(W1_val, grad_W1_val, lr)
sgd_update_gpu(b1_val, grad_b1_val, lr)
if print_loss_val_each_epoch:
if isinstance(loss_val, ndarray.NDArray):
print(loss_val.asnumpy())
else:
print(loss_val)
correct_predictions = []
for minibatch_index in range(n_valid_batches):
minibatch_start = minibatch_index * batch_size
minibatch_end = (minibatch_index + 1) * batch_size
valid_X_val[:] = valid_set_x[minibatch_start:minibatch_end]
valid_y_val[:] = convert_to_one_hot(
valid_set_y[minibatch_start:minibatch_end])
_, _, _, valid_y_predicted = executor.run(
feed_dict={
X: valid_X_val,
y_: valid_y_val,
W1: W1_val,
b1: b1_val},
convert_to_numpy_ret_vals=True)
correct_prediction = np.equal(
np.argmax(valid_y_val, 1),
np.argmax(valid_y_predicted, 1)).astype(np.float)
correct_predictions.extend(correct_prediction)
accuracy = np.mean(correct_predictions)
# validation set accuracy=0.928200
print("validation set accuracy=%f" % accuracy)
def mnist_mlp(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False):
print("Build 3-layer MLP model...")
W1 = ad.Variable(name="W1")
W2 = ad.Variable(name="W2")
W3 = ad.Variable(name="W3")
b1 = ad.Variable(name="b1")
b2 = ad.Variable(name="b2")
b3 = ad.Variable(name="b3")
X = ad.Variable(name="X")
y_ = ad.Variable(name="y_")
# relu(X W1+b1)
z1 = ad.matmul_op(X, W1)
z2 = z1 + ad.broadcastto_op(b1, z1)
z3 = ad.relu_op(z2)
# relu(z3 W2+b2)
z4 = ad.matmul_op(z3, W2)
z5 = z4 + ad.broadcastto_op(b2, z4)
z6 = ad.relu_op(z5)
# softmax(z5 W2+b2)
z7 = ad.matmul_op(z6, W3)
y = z7 + ad.broadcastto_op(b3, z7)
loss = ad.softmaxcrossentropy_op(y, y_)
grad_W1, grad_W2, grad_W3, grad_b1, grad_b2, grad_b3 = ad.gradients(
loss, [W1, W2, W3, b1, b2, b3])
executor = ad.Executor(
[loss, grad_W1, grad_W2, grad_W3, grad_b1, grad_b2, grad_b3, y],
ctx=executor_ctx)
# Read input data
datasets = load_mnist_data("mnist.pkl.gz")
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# Set up minibatch
batch_size = 1000
n_train_batches = train_set_x.shape[0] // batch_size
n_valid_batches = valid_set_x.shape[0] // batch_size
print("Start training loop...")
# Initialize parameters
rand = np.random.RandomState(seed=123)
W1_val = rand.normal(scale=0.1, size=(784, 256))
W2_val = rand.normal(scale=0.1, size=(256, 100))
W3_val = rand.normal(scale=0.1, size=(100, 10))
b1_val = rand.normal(scale=0.1, size=(256))
b2_val = rand.normal(scale=0.1, size=(100))
b3_val = rand.normal(scale=0.1, size=(10))
X_val = np.empty(shape=(batch_size, 784), dtype=np.float32)
y_val = np.empty(shape=(batch_size, 10), dtype=np.float32)
valid_X_val = np.empty(shape=(batch_size, 784), dtype=np.float32)
valid_y_val = np.empty(shape=(batch_size, 10), dtype=np.float32)
if ndarray.is_gpu_ctx(executor_ctx):
W1_val = ndarray.array(W1_val, ctx=executor_ctx)
W2_val = ndarray.array(W2_val, ctx=executor_ctx)
W3_val = ndarray.array(W3_val, ctx=executor_ctx)
b1_val = ndarray.array(b1_val, ctx=executor_ctx)
b2_val = ndarray.array(b2_val, ctx=executor_ctx)
b3_val = ndarray.array(b3_val, ctx=executor_ctx)
X_val = ndarray.array(X_val, ctx=executor_ctx)
y_val = ndarray.array(y_val, ctx=executor_ctx)
lr = 1.0e-3
for i in range(num_epochs):
print("epoch %d" % i)
for minibatch_index in range(n_train_batches):
minibatch_start = minibatch_index * batch_size
minibatch_end = (minibatch_index + 1) * batch_size
X_val[:] = train_set_x[minibatch_start:minibatch_end]
y_val[:] = convert_to_one_hot(
train_set_y[minibatch_start:minibatch_end])
loss_val, grad_W1_val, grad_W2_val, grad_W3_val, \
grad_b1_val, grad_b2_val, grad_b3_val, _ = executor.run(
feed_dict={
X: X_val,
y_: y_val,
W1: W1_val,
W2: W2_val,
W3: W3_val,
b1: b1_val,
b2: b2_val,
b3: b3_val})
# SGD update
if (executor_ctx is None):
W1_val = W1_val - lr * grad_W1_val
W2_val = W2_val - lr * grad_W2_val
W3_val = W3_val - lr * grad_W3_val
b1_val = b1_val - lr * grad_b1_val
b2_val = b2_val - lr * grad_b2_val
b3_val = b3_val - lr * grad_b3_val
else:
sgd_update_gpu(W1_val, grad_W1_val, lr)
sgd_update_gpu(W2_val, grad_W2_val, lr)
sgd_update_gpu(W3_val, grad_W3_val, lr)
sgd_update_gpu(b1_val, grad_b1_val, lr)
sgd_update_gpu(b2_val, grad_b2_val, lr)
sgd_update_gpu(b3_val, grad_b3_val, lr)
if print_loss_val_each_epoch:
if isinstance(loss_val, ndarray.NDArray):
print(loss_val.asnumpy())
else:
print(loss_val)
correct_predictions = []
for minibatch_index in range(n_valid_batches):
minibatch_start = minibatch_index * batch_size
minibatch_end = (minibatch_index + 1) * batch_size
valid_X_val[:] = valid_set_x[minibatch_start:minibatch_end]
valid_y_val[:] = convert_to_one_hot(
valid_set_y[minibatch_start:minibatch_end])
_, _, _, _, _, _, _, valid_y_predicted = executor.run(
feed_dict={
X: valid_X_val,
y_: valid_y_val,
W1: W1_val,
W2: W2_val,
W3: W3_val,
b1: b1_val,
b2: b2_val,
b3: b3_val},
convert_to_numpy_ret_vals=True)
correct_prediction = np.equal(
np.argmax(valid_y_val, 1),
np.argmax(valid_y_predicted, 1)).astype(np.float)
correct_predictions.extend(correct_prediction)
accuracy = np.mean(correct_predictions)
# validation set accuracy=0.970800
print("validation set accuracy=%f" % accuracy)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--model",
help="Choose model: all, logreg, mlp", default="all")
parser.add_argument(
"-c", "--executor_context",
help="Choose executor context: numpy, gpu", default="numpy")
parser.add_argument(
"-e", "--num_epoch",
help="Provide number of epochs to train.", type=int, default=20)
parser.add_argument(
"-l", "--print_loss_val_each_epoch",
help="Print loss value at the end of each epoch", action="store_true")
args = parser.parse_args()
models = []
executor_ctx = None
print_loss_val_each_epoch = False
if args.model == "logreg":
models = [mnist_logreg]
elif args.model == "mlp":
models = [mnist_mlp]
elif args.model == "all":
models = [mnist_logreg, mnist_mlp]
if args.executor_context == "numpy":
executor_ctx = None
elif args.executor_context == "gpu":
# Assume only use gpu 0.
executor_ctx = ndarray.gpu(0)
if args.print_loss_val_each_epoch:
print_loss_val_each_epoch = True
num_epochs = args.num_epoch
for m in models:
import time
tic = time.time()
m(executor_ctx, num_epochs, print_loss_val_each_epoch)
toc = time.time()
print("mode use time: " + str(toc - tic))
| UTF-8 | Python | false | false | 12,709 | py | 9 | mnist_dlsys.py | 5 | 0.574317 | 0.548588 | 0 | 336 | 36.824405 | 84 |
k-harada/AtCoder | 13,185,549,639,394 | 44482682b28d6945d16a049a5187a3a4c602a552 | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /AGC/AGC043/A.py | 8c85f28c12c358f60c60b337e675202157b4ef00 | []
| no_license | https://github.com/k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | false | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | 2022-11-05T03:53:20 | 2023-05-22T16:29:17 | 548 | 9 | 0 | 1 | Python | false | false | from collections import deque
def solve(h, w, s_list):
# cost_map
cost = [[1000] * w for _ in range(h)]
cost[0][0] = 0
# find how many flip
queue = deque([(0, 0)])
while len(queue) > 0:
i, j = queue.popleft()
if i < h - 1:
if s_list[i][j] != s_list[i + 1][j]:
new_cost = cost[i][j] + 1
else:
new_cost = cost[i][j]
if new_cost < cost[i + 1][j]:
cost[i + 1][j] = new_cost
queue.append((i + 1, j))
if j < w - 1:
if s_list[i][j] != s_list[i][j + 1]:
new_cost = cost[i][j] + 1
else:
new_cost = cost[i][j]
if new_cost < cost[i][j + 1]:
cost[i][j + 1] = new_cost
queue.append((i, j + 1))
res = (cost[h - 1][w - 1] + 1) // 2
# print(cost)
# black-black not admitted
if s_list[0][0] == "#" and s_list[h - 1][w - 1] == "#":
res += 1
return res
def main():
h, w = map(int, input().split())
s_list = [list(input()) for _ in range(h)]
res = solve(h, w, s_list)
print(res)
def test():
assert solve(3, 3, [[".", "#", "#"], [".", "#", "."], ["#", "#", "."]]) == 1
assert solve(2, 2, [["#", "."], [".", "#"]]) == 2
if __name__ == "__main__":
test()
main()
| UTF-8 | Python | false | false | 1,356 | py | 1,154 | A.py | 1,152 | 0.388643 | 0.361357 | 0 | 51 | 25.588235 | 80 |
RazvanKokovics/Planes | 4,561,255,289,033 | 0ce24a47a8c22988b91a3b95f9f670155556ac1a | 4d4c45a2d7788ef1fa32f79f690ddc3302bd5887 | /domain/plane.py | 6350c454461e2e2b153f144778a2a16229d17e11 | []
| no_license | https://github.com/RazvanKokovics/Planes | 326c38c46b408028f19c33a68854dd5ed5dd295b | dbfe16fad6d50ed656bc093117b43f9ff5437e11 | refs/heads/master | 2022-03-30T07:49:16.913896 | 2020-01-09T17:02:54 | 2020-01-09T17:02:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from imports.gameHandlers import GameHandlers
class Plane():
#the plane class which represents a plane object
def __init__(self, cabinPosition, orientation):
#cabinPosition = str, a combination of a letter (A -> H) and a digit (1-> 8)
#orientation = str, it could be one of the following ('up', 'down', 'left', 'right')
self._pos = cabinPosition
self._orientation = orientation
def getPos(self):
#returns the cabinPosition of a plane
return self._pos
def getOrientation(self):
#returns the orientation of a plane
return self._orientation
def getPlaneCells(self):
#returns a list with all the cells of a plane
directions = GameHandlers.getDirections(self._orientation)
sign = GameHandlers.getSign(self._orientation)
cells = []
position = GameHandlers.stringToCoordinates(self._pos)
for d in directions:
#row
r = position[0] + sign * d[0]
#column
c = position[1] + sign * d[1]
cells.append((r, c))
return cells | UTF-8 | Python | false | false | 1,167 | py | 12 | plane.py | 11 | 0.582691 | 0.577549 | 0 | 32 | 34.53125 | 92 |
lgarrison/kvsstcp | 7,430,293,429,309 | 3cb2f7f8e5819308985d5745c91a4d592a49d0a9 | 266ae62162796227fcb97b7e4fb789de53df37e0 | /kvsstcp.py | 72cdcb7f9c3b6eb9745ca6c956852659b700e548 | [
"Apache-2.0"
]
| permissive | https://github.com/lgarrison/kvsstcp | 55ab87e5f885e5709f3068dc833a0f6a697bf482 | a329d76efcd24028acfea4b60e08d0bbbbb85c17 | refs/heads/master | 2021-05-20T03:47:26.929675 | 2020-09-08T18:44:50 | 2020-09-08T18:44:50 | 252,172,058 | 0 | 0 | Apache-2.0 | true | 2020-04-01T12:42:51 | 2020-04-01T12:42:50 | 2019-11-15T10:03:14 | 2018-07-13T17:49:45 | 116 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
from collections import defaultdict as DD
try:
from cPickle import dumps as PDS
except ImportError:
from pickle import dumps as PDS
from functools import partial
import errno
import gc
import logging
import os
import resource
import select
import socket
import sys
import threading
try:
from .kvscommon import *
except:
from kvscommon import *
logger = logging.getLogger('kvs')
# There are some cyclic references in in asyncio, handlers, waiters, etc., so I'm re-enabling this:
#gc.disable()
_DISCONNECTED = frozenset((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN, errno.ECONNABORTED, errno.EPIPE, errno.EBADF))
_BUFSIZ = 8192
# Concepts:
#
# Every connection is represented by a dispatcher.
#
# Every dispatcher is registered with a handler, which in effect runs
# the KVS server loop.
#
# The handler runs an infinite loop that mostly sits on a poll of some
# sort waiting for one or more events associated with registered
# connections (identified by their file descriptor).
#
# When an event occurs the dispatcher associated with the connection
# is used to process the event.
#
# The listening socket is treated just like any other connection and
# has its own dispatcher. An "event" on this connection triggers an
# accept that leads to the creation of a new dispatcher
# (KVSRequestDispatcher) to handle exchanges with the client.
#
# This approach has the very important benefit that it is single threaded.
class Handler(object):
'''Based on asyncore, but with a simpler, stricter per-thread interface that allows better performance.'''
def __init__(self):
self.disps = dict()
self.current = None
self.running = True
def register(self, disp):
self.disps[disp.fd] = disp
def unregister(self, disp):
del self.disps[disp.fd]
def run(self):
while self.running:
try:
self.poll()
except IOError as e:
if e.errno == errno.EINTR:
continue
raise
for d in list(self.disps.values()):
try:
d.close()
except Exception as e:
logger.info('%r reported %r on close in handler.', d, e)
self.close()
def writable(self, disp):
"Equivalent to setting mask | OUT, but safe to be called from other (non-current) handlers."
if disp.mask & self.OUT: return
disp.mask |= self.OUT
# write can be called from other threads
if self.current is not disp:
self.modify(disp)
def close(self):
self.running = False
class PollHandler(Handler):
def __init__(self):
self.IN, self.OUT, self.EOF = select.POLLIN, select.POLLOUT, select.POLLHUP
self.poller = select.poll()
Handler.__init__(self)
def register(self, disp):
Handler.register(self, disp)
self.poller.register(disp.fd, disp.mask)
def unregister(self, disp):
self.poller.unregister(disp.fd)
Handler.unregister(self, disp)
def modify(self, disp):
self.poller.modify(disp.fd, disp.mask)
def poll(self):
ev = self.poller.poll()
for (f, e) in ev:
d = self.current = self.disps[f]
oldm = d.mask
if e & self.EOF:
d.handle_close()
continue
if e & self.IN:
d.handle_read()
if d.mask & self.OUT:
d.handle_write()
self.current = None
if d.mask != oldm and not (d.mask & self.EOF):
self.modify(d)
def stop(self, disp):
Handler.close(self)
class EPollHandler(PollHandler):
def __init__(self):
self.IN, self.OUT, self.EOF = select.EPOLLIN, select.EPOLLOUT, select.EPOLLHUP
self.poller = select.epoll()
Handler.__init__(self)
def close(self):
self.poller.close()
Handler.close(self)
class KQueueHandler(Handler):
def __init__(self):
self.IN, self.OUT, self.EOF = 1, 2, 4
self.kqueue = select.kqueue()
Handler.__init__(self)
def register(self, disp):
Handler.register(self, disp)
disp.curmask = 0
self.modify(disp)
def unregister(self, disp):
disp.mask = 0
self.modify(disp)
Handler.unregister(self, disp)
def modify(self, disp):
c = []
if disp.mask & self.IN:
if not (disp.curmask & self.IN):
c.append(select.kevent(disp.fd, select.KQ_FILTER_READ, select.KQ_EV_ADD))
elif disp.curmask & self.IN:
c.append(select.kevent(disp.fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE))
if disp.mask & self.OUT:
if not (disp.curmask & self.OUT):
c.append(select.kevent(disp.fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD))
elif disp.curmask & self.OUT:
c.append(select.kevent(disp.fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE))
if c: self.kqueue.control(c, 0)
disp.curmask = disp.mask
def poll(self):
try:
ev = self.kqueue.control(None, 1024)
except OSError as e:
if e.errno == errno.EBADF:
self.running = False
return
raise
for e in ev:
d = self.current = self.disps[e.ident]
if e.filter == select.KQ_FILTER_READ:
d.handle_read()
elif e.filter == select.KQ_FILTER_WRITE:
d.handle_write()
self.current = None
if self.running: self.modify(d)
def close(self):
self.kqueue.close()
Handler.close(self)
def stop(self, disp):
self.close()
class Dispatcher(object):
def __init__(self, sock, handler, mask=0):
self.sock = sock
self.fd = sock.fileno()
self.mask = mask
sock.setblocking(0)
self.handler = handler
def open(self):
self.handler.register(self)
def close(self):
self.mask = self.handler.EOF
self.handler.unregister(self)
try:
self.sock.close()
except socket.error:
pass
def accept(self):
try:
return self.sock.accept()
except socket.error as e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return
if e.errno in _DISCONNECTED or e.errno == errno.EINVAL:
self.handle_close()
return
raise
def send(self, data):
try:
return self.sock.send(data)
except socket.error as e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return 0
if e.errno in _DISCONNECTED:
self.handle_close()
return 0
raise
def recv(self, siz):
try:
data = self.sock.recv(siz)
if not data:
self.handle_close()
return data
except socket.error as e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return b''
if e.errno in _DISCONNECTED:
self.handle_close()
return b''
raise
def recv_into(self, buf):
try:
n = self.sock.recv_into(buf)
if n == 0:
self.handle_close()
return n
except socket.error as e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return b''
if e.errno in _DISCONNECTED:
self.handle_close()
return b''
raise
def shutdown(self):
try:
self.mask |= self.handler.IN
self.sock.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno not in _DISCONNECTED: raise
def handle_close(self):
self.close()
class StreamDispatcher(Dispatcher):
'''Based on asyncore.dispatcher_with_send, works with EventHandler.
Also allows input of known-size blocks.'''
def __init__(self, sock, handler):
super(StreamDispatcher, self).__init__(sock, handler)
self.out_buf = []
self.in_buf = memoryview(bytearray(_BUFSIZ))
self.in_off = 0
self.read_size = 0
self.read_handler = None
def write(self, *data):
for d in data:
self.out_buf.append(memoryview(d))
self.handler.writable(self)
def handle_write(self):
while self.out_buf:
buf = self.out_buf[0]
r = self.send(buf[:1048576])
if r < len(buf):
if r: self.out_buf[0] = buf[r:]
return
self.out_buf.pop(0)
self.mask &= ~self.handler.OUT
def next_read(self, size, f):
self.read_size = size
if size > len(self.in_buf):
buf = memoryview(bytearray(max(size, _BUFSIZ)))
buf[:self.in_off] = self.in_buf[:self.in_off]
self.in_buf = buf
self.read_handler = f
self.mask |= self.handler.IN
def handle_read(self):
if self.in_off < len(self.in_buf):
self.in_off += self.recv_into(self.in_buf[self.in_off:])
while True:
handler = self.read_handler
z = self.read_size
if not handler or self.in_off < z:
return
i = self.in_buf[:z]
self.in_buf = self.in_buf[z:]
self.in_off -= z
self.read_handler = None
self.mask &= ~self.handler.IN
handler(i)
class KVSRequestDispatcher(StreamDispatcher):
def __init__(self, pair, server, handler):
sock, self.addr = pair
self.server = server
# Keep track of any currently waiting get:
self.waiter = None
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
super(KVSRequestDispatcher, self).__init__(sock, handler)
logger.info('Accepted connect from %r', self.addr)
self.next_op()
self.open()
def handle_close(self):
self.cancel_waiter()
logger.info('Closing connection from %r', self.addr)
self.close()
def error(self, msg):
logger.error('Error from %r: %s' % (self.addr, msg))
self.close()
def cancel_waiter(self):
if self.waiter:
self.server.kvs.cancel_wait(self.waiter)
self.waiter = None
def next_op(self):
self.next_read(4, self.handle_op)
def next_lendata(self, handler):
# wait for variable-length data prefixed by AsciiLenFormat
def handle_len(l):
l = l.tobytes()
try:
n = int(l)
except ValueError:
n = -1
if n < 0:
self.error("invalid data len: '%s'" % l)
return
self.next_read(n, handler)
self.next_read(AsciiLenChars, handle_len)
def handle_op(self, op):
op = op.tobytes()
if b'clos' == op:
self.shutdown()
elif b'down' == op:
logger.info('Calling server shutdown')
self.server.shutdown()
elif b'dump' == op:
d = self.server.kvs.dump()
self.write(AsciiLenFormat(len(d)), d)
self.next_op()
elif op in [b'get_', b'mkey', b'put_', b'view']:
self.next_lendata(partial(self.handle_opkey, op))
else:
self.error("Unknown op: '%r'" % op)
def handle_opkey(self, op, key):
key = key.tobytes()
#DEBUGOFF logger.debug('(%s) %s key "%s"', whoAmI, reqtxt, key)
if b'mkey' == op:
self.next_lendata(partial(self.handle_mkey, key))
elif b'put_' == op:
self.next_read(4, lambda encoding:
self.next_lendata(partial(self.handle_put, key, encoding)))
else: # 'get_' or 'view'
# Cancel waiting for any previous get/view operation (since client wouldn't be able to distinguish the async response)
self.cancel_waiter()
self.waiter = KVSWaiter(op, key, self.handle_got)
self.server.kvs.wait(self.waiter)
# But keep listening for another op (like 'clos') to cancel this one
self.next_op()
def handle_mkey(self, key, val):
#DEBUGOFF logger.debug('(%s) val: %s', whoAmI, repr(val))
self.server.kvs.monkey(key, val)
self.next_op()
def handle_put(self, key, encoding, val):
# TODO: bytearray val?
#DEBUGOFF logger.debug('(%s) val: %s', whoAmI, repr(val))
self.server.kvs.put(key, (encoding, val))
self.next_op()
def handle_got(self, encval):
(encoding, val) = encval
self.write(encoding, AsciiLenFormat(len(val)), val)
self.waiter = None
class KVSWaiter:
def __init__(self, op, key, handler):
if op == b'get_': op = b'get'
self.op = op
self.delete = op == b'get'
self.key = key
self.handler = handler
class KVS(object):
'''Get/Put/View implements a client-server key value store. If no
value is associated with a given key, clients will block on get or
view until a value is available. Multiple values may be associated
with any given key.
This is, by design, a very simple, lightweight service that only
depends on standard Python modules.
'''
def __init__(self, getIndex=0, viewIndex=-1):
self.getIndex, self.viewIndex = getIndex, viewIndex #TODO: Add sanity checks?
self.key2mon = DD(lambda:DD(set)) # Maps a normal key to keys that monitor it.
self.monkeys = set() # List of monitor keys.
# store and waiters are mutually exclusive, and could be kept in the same place
self.store = DD(list)
self.waiters = DD(list)
self.opCounts = {b'get': 0, b'put': 0, b'view': 0, b'wait': 0}
self.ac, self.rc = 0, 0
def _doMonkeys(self, op, k):
# Don't monitor operations on monitor keys.
if k in self.monkeys: return
#DEBUGOFF logger.debug('doMonkeys: %s %s %s', op, k, repr(self.key2mon[True][op] | self.key2mon[k][op]))
for p in (True, k):
for mk in self.key2mon[p][op]:
self.put(mk, (b'ASTR', repr((op, k))))
def dump(self):
'''Utility function that returns a snapshot of the KV store.'''
def vrep(v):
t = v[0].tobytes()
# Omit or truncate some values, in which cases add the original length as a third value
if v == b'JSON' or t == b'HTML': return (t, v[1].tobytes())
if t != b'ASTR': return (t, None, len(v[1]))
if v[1][:6].tobytes().lower() == '<html>': return (t, v[1].tobytes()) # for backwards compatibility only
if len(v[1]) > 50: return (t, v[1][:24].tobytes() + '...' + v[1][-23:].tobytes(), len(v[1]))
return (t, v[1].tobytes())
return PDS(([self.opCounts[b'get'], self.opCounts[b'put'], self.opCounts[b'view'], self.opCounts[b'wait'], self.ac, self.rc], [(k, len(v)) for k, v in self.waiters.items() if v], [(k, len(vv), vrep(vv[-1])) for k, vv in self.store.items() if vv]))
def wait(self, waiter):
'''Atomically (remove and) return a value associated with key k. If
none, block.'''
#DEBUGOFF logger.debug('wait: %s, %s', repr(waiter.key), repr(waiter.op))
self._doMonkeys(waiter.op, waiter.key)
vv = self.store.get(waiter.key)
if vv:
if waiter.delete:
v = vv.pop(self.getIndex)
if not vv: self.store.pop(waiter.key)
else:
v = vv[self.viewIndex]
self.opCounts[waiter.op] += 1
#DEBUGOFF logger.debug('_gv (%s): %s => %s (%d)', waiter.op, waiter.key, repr(v[0]), len(v[1]))
waiter.handler(v)
else:
self.waiters[waiter.key].append(waiter)
self.opCounts[b'wait'] += 1
self._doMonkeys(b'wait', waiter.key)
#DEBUGOFF logger.debug('(%s) %s acquiring', repr(waiter), repr(s))
self.ac += 1
def cancel_wait(self, waiter):
ww = self.waiters.get(waiter.key)
if ww:
try:
ww.remove(waiter)
except ValueError:
pass
if not ww: self.waiters.pop(waiter.key)
def monkey(self, mkey, v):
'''Make Mkey a monitor key. Value encodes what events to monitor and
for which key:
Key:Events
Whenever a listed event occurs for "Key", a put will be done
to "Mkey" with the value "<event> <key>". If 'Key' is empty,
the events listed will be monitored for all keys. 'Events' is
some subset of 'g', 'p', 'v' and 'w' (get, put, view and
wait). Monitoring of any event *not* listed is turned off for
the specified key.
'''
#DEBUGOFF logger.debug('monkey: %s %s', mkey, v)
if b':' not in v: return #TODO: Add some sort of error handling?
self.monkeys.add(mkey)
k, events = v.rsplit(b':', 1)
if not k: k = True
for e, op in [(b'g', b'get'), (b'p', b'put'), (b'v', b'view'), (b'w', b'wait')]:
if e in events:
self.key2mon[k][op].add(mkey)
else:
try: self.key2mon[k][op].remove(mkey)
except KeyError: pass
#DEBUGOFF logger.debug('monkey: %s', repr(self.key2mon))
def put(self, k, v):
'''Add value v to those associated with the key k.'''
#DEBUGOFF logger.debug('put: %s, %s', repr(k), repr(v))
self.opCounts[b'put'] += 1
ww = self.waiters.get(k) # No waiters is probably most common, so optimize for
# that. ww will be None if no waiters have been
# registered for key k.
consumed = False
if ww:
while ww:
waiter = ww.pop(0)
#DEBUGOFF logger.debug('%s releasing', repr(waiter))
self.rc += 1
self.opCounts[waiter.op] += 1
waiter.handler(v)
if waiter.delete:
consumed = True
break
if not ww: self.waiters.pop(k)
if not consumed: self.store[k].append(v)
self._doMonkeys(b'put', k)
class KVSServer(threading.Thread, Dispatcher):
def __init__(self, host=None, port=0):
if not host: host = socket.gethostname()
self.kvs = KVS()
snof, hnof = resource.getrlimit(resource.RLIMIT_NOFILE)
hnof = min(hnof, 1000000) # don't need unreasonably many
if snof < hnof:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hnof, hnof))
logger.info('Raised max open files from %d to %d', snof, hnof)
except:
logger.info('Failed to raise max open files from %d to %d; continuing anyway', snof, hnof)
pass
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
logger.info('Setting queue size to 4000')
self.sock.listen(4000)
self.cinfo = self.sock.getsockname()
if hasattr(select, 'epoll'):
self.handler = EPollHandler()
elif hasattr(select, 'kqueue'):
self.handler = KQueueHandler()
else:
self.handler = PollHandler()
Dispatcher.__init__(self, self.sock, self.handler, self.handler.IN)
self.open()
threading.Thread.__init__(self, name='KVSServerThread', target=self.handler.run)
self.start()
def handle_read(self):
pair = self.accept()
if pair:
KVSRequestDispatcher(pair, self, self.handler)
def handle_close(self):
logger.info('Server shutting down')
self.close()
self.handler.close()
def shutdown(self):
if self.handler.running:
super(KVSServer, self).shutdown()
self.handler.stop(self)
def env(self, env = os.environ.copy()):
'''Add the KVSSTCP environment variables to the given environment.'''
env['KVSSTCP_HOST'] = self.cinfo[0]
env['KVSSTCP_PORT'] = str(self.cinfo[1])
return env
if '__main__' == __name__:
import argparse
argp = argparse.ArgumentParser(description='Start key-value storage server.')
argp.add_argument('-H', '--host', default='', help='Host interface (default is hostname).')
argp.add_argument('-p', '--port', type=int, default=0, help='Port (default is 0 --- let the OS choose).')
argp.add_argument('-a', '--addrfile', default=None, metavar='AddressFile', type=argparse.FileType('w'), help='Write address to this file.')
argp.add_argument('-e', '--execcmd', default=None, metavar='COMMAND SEQUENCE', help='Execute command with augmented environment.')
argp.add_argument('-l', '--logfile', default=None, metavar='KVSSLogfile', type=argparse.FileType('w'), help='Log file for key-value storage server.')
args = argp.parse_args()
# TODO: figure out where this should really go.
lconf = {'format': '%(asctime)s %(levelname)-8s %(name)-15s: %(message)s', 'level': logging.DEBUG}
if args.logfile:
args.logfile.close()
lconf['filename'] = args.logfile.name
logging.basicConfig(**lconf)
t = KVSServer(args.host, args.port)
addr = '%s:%d'%t.cinfo
logger.info('Server running at %s.', addr)
if args.addrfile:
args.addrfile.write(addr)
args.addrfile.close()
try:
if args.execcmd:
import subprocess
logger.info('Launching: %r, env %r', args.execcmd, t.env())
subprocess.check_call(args.execcmd, shell=True, env=t.env())
else:
while t.isAlive():
t.join(60)
finally:
t.shutdown()
t.join()
| UTF-8 | Python | false | false | 22,260 | py | 30 | kvsstcp.py | 9 | 0.558086 | 0.553324 | 0 | 634 | 34.11041 | 255 |
caodg/docklet | 7,481,833,071,893 | c063b21ae00c2a6251074e933033b474da28e954 | c0153c0c2a171b3cb4dff2a01e033f031ce0bce5 | /src/tools.py | b3c62d2369a75f3b174f81a89e05c10d3f9ce284 | [
"BSD-2-Clause"
]
| permissive | https://github.com/caodg/docklet | a58d3ca45d54812b5a952f0ed1fdff3c0754e719 | 84e511acef9d7deff75370ede189a48ab1e9c4dd | refs/heads/master | 2021-01-13T04:59:02.122962 | 2017-06-06T12:39:45 | 2017-06-06T12:39:45 | 55,135,843 | 3 | 0 | null | true | 2016-03-31T09:00:59 | 2016-03-31T09:00:59 | 2016-03-31T09:00:49 | 2016-03-31T08:15:54 | 987 | 0 | 0 | 0 | null | null | null | #!/usr/bin/python3
import os, random
#from log import logger
def loadenv(configpath):
configfile = open(configpath)
#logger.info ("load environment from %s" % configpath)
for line in configfile:
line = line.strip()
if line == '':
continue
keyvalue = line.split("=")
if len(keyvalue) < 2:
continue
key = keyvalue[0].strip()
value = keyvalue[1].strip()
#logger.info ("load env and put env %s:%s" % (key, value))
os.environ[key] = value
def gen_token():
return str(random.randint(10000, 99999))+"-"+str(random.randint(10000, 99999))
| UTF-8 | Python | false | false | 646 | py | 115 | tools.py | 68 | 0.577399 | 0.540248 | 0 | 23 | 27.086957 | 82 |
munenelewis/somo | 2,731,599,219,377 | 31e9a353664595c85abda845d34793c3313185fe | 547cda72a9a7bde7cff3ef208261ceb1c13645ca | /somoApp/models.py | ea162fcf19488a322bf6c0eeb14838507400d22b | []
| no_license | https://github.com/munenelewis/somo | 6e61de99ed3012b553be5197236a598110c69af2 | 2b26c3135e8ccae3845caf2e000e167c1ecd085a | refs/heads/master | 2020-03-08T17:40:49.005471 | 2018-04-05T23:21:08 | 2018-04-05T23:21:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class Classes(models.Model):
Classes = models.IntegerField()
block = models.CharField(max_length=255)
class Comments(models.Model):
body = models.CharField(max_length=255)
expectations = models.CharField(max_length=255)
class fees(models.Model):
bal = models.IntegerField()
amountpaid = models.IntegerField()
class Results(models.Model):
kiswahili = models.IntegerField()
math = models.IntegerField()
social_cre = models.IntegerField()
engkish = models.IntegerField()
science = models.IntegerField()
total = models.IntegerField()
class Students(models.Model):
name =models.CharField(max_length=255)
adm =models.IntegerField(unique=True)
form =models.CharField(max_length=255)
photo =models.ImageField()
email = models.EmailField()
classes = models.ForeignKey(Classes)
comments =models.ForeignKey(Comments)
fee = models.ForeignKey(fees)
marks = models.ForeignKey(Results)
class Parents(models.Model):
name = models.CharField(max_length=200)
adm = models.ForeignKey(Students)
number = models.IntegerField()
class Teachers(models.Model):
name = models.CharField(max_length = 200)
email = models.EmailField(max_length=200)
parent = models.ForeignKey(Parents)
| UTF-8 | Python | false | false | 1,503 | py | 9 | models.py | 8 | 0.634065 | 0.618097 | 0 | 68 | 18.955882 | 52 |
k550495/SoChange | 15,272,903,739,022 | ab2e215a714f59e40ebbc24726cc11ffe278ab55 | a62db41456ce40947deb373b4e97d25ec98d356b | /sochange/expensify/urls.py | 60e2d1400492937ab3f8d70f322da17331e38364 | []
| no_license | https://github.com/k550495/SoChange | 86fa746a9ef69f40c85035af7d7e72d052fc8201 | 852d7545442f0b93fba17f22ac1e3f2bc89735eb | refs/heads/master | 2021-01-13T03:17:35.789014 | 2010-05-12T05:02:21 | 2010-05-12T05:02:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
urlpatterns = patterns('expensify.views',
)
| UTF-8 | Python | false | false | 145 | py | 53 | urls.py | 26 | 0.793103 | 0.793103 | 0 | 6 | 23.166667 | 58 |
waittim/draw_yolo_box | 8,564,164,797,203 | 8e3e99cc00cc62bbf0335d95aaec97895a2a45cf | ce43ea964d9af55a8ecb4b12faa5a9789d2dcd07 | /get_origin_image.py | 8b895e4b9e7d44bfec213a1ea74e00f2f002497d | [
"MIT"
]
| permissive | https://github.com/waittim/draw_yolo_box | 017b0c23666a213ec99be481bce5f23c6c91fbb5 | 015556c219ffb795f02dfd8547ec94ee8baa8652 | refs/heads/main | 2023-03-13T16:00:21.625607 | 2022-11-26T04:55:47 | 2022-11-26T04:55:47 | 321,011,431 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf-8
import cv2
import os
import piexif
#全局变量进行路径配置
raw_images_folder = './raw_images/' #检查图片存放文件夹raw_images路径
save_images_folder = './save_image/' #保存图片文件夹save_image路径
name_list_path = './name_list.txt' #里面有检测图片名称得txt文件路径
wrong_folder = './wrong/' #错判图片文件夹wrong路径
#函数:将wrong文件夹内的对应原文件保存到save_image
def get_image(image_name,raw_images_folder,save_images_folder ):
print(image_name)
if image_name == '.DS_Store':
return 0
image_path = os.path.join( raw_images_folder,'%s.jpg'%(image_name)) #本次原始图片jpg路径
save_file_path = os.path.join(save_images_folder,'%s.jpg'%(image_name)) #本次保存图片jpg路径
image = cv2.imread(image_path)
cv2.imwrite(save_file_path,image)
piexif.remove(save_file_path)
#函数:通过保存有错判图片得文件夹,生成写有所有错判图片名称(不带后缀)得txt
def make_name_list(wrong_folder, name_list_path):
image_file_list = os.listdir(wrong_folder) #得到该路径下所有文件名称带后缀
text_image_name_list_file=open(name_list_path,'w') #以写入得方式打开txt ,方便更新 不要用追加写
for image_file_name in image_file_list:#例遍写入
image_name,file_extend = os.path.splitext(image_file_name) # 去掉扩展命
text_image_name_list_file.write(image_name+'\n') #写入
text_image_name_list_file.close()
if __name__ == '__main__': # 只有在文件作为脚本文件直接执行时才执行下面代码
make_name_list(wrong_folder, name_list_path) #执行写入txt函数
image_names = open(name_list_path).read().strip().split() #得到图片名字不带后缀
image_total = 0
for image_name in image_names: #例遍图片名称
box_num = get_image(image_name,raw_images_folder,save_images_folder)
image_total += 1
print('Image number:',image_total)
| UTF-8 | Python | false | false | 2,055 | py | 14 | get_origin_image.py | 2 | 0.679951 | 0.675659 | 0 | 53 | 29.773585 | 88 |
Gracker/TTDeDroid | 13,357,348,320,869 | dee7a1729b7373f84b982f8809bab73f6f0867fd | 23b1a3c48098c3e9974aef874adae47c63a0f7c3 | /libs/enjarify/enjarify/jvm/genmathops.py | e76f68fb25ce31b9f0b7861228072ffce9273050 | [
"Apache-2.0"
]
| permissive | https://github.com/Gracker/TTDeDroid | dcee2404226673ccfdebc12074b75bd609afc93b | 913876bebdba727bb9bd203983e281b3e036ca3b | refs/heads/master | 2023-04-08T11:01:06.799698 | 2023-03-19T13:06:08 | 2023-03-19T13:06:08 | 271,542,788 | 1 | 1 | Apache-2.0 | true | 2020-06-11T12:39:39 | 2020-06-11T12:39:39 | 2020-06-08T11:56:16 | 2020-05-16T07:08:40 | 121,801 | 0 | 0 | 0 | null | false | false | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate mathops.py, the lookup tables giving information about dalvik math operations by opcode
if __name__ == "__main__":
unary = 'ineg inot lneg lnot fneg dneg i2l i2f i2d l2i l2f l2d f2i f2l f2d d2i d2l d2f i2b i2c i2s'
binary = 'iadd isub imul idiv irem iand ior ixor ishl ishr iushr ladd lsub lmul ldiv lrem land lor lxor lshl lshr lushr fadd fsub fmul fdiv frem dadd dsub dmul ddiv drem'
binary = binary + ' ' + binary
binlit = 'iadd isub imul idiv irem iand ior ixor '
binlit = binlit + binlit + 'ishl ishr iushr'
stypes = dict(zip('ifldbcs', 'INT FLOAT LONG DOUBLE INT INT INT'.split()))
print('''
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by genmathops.py - do not edit''')
print('from . import jvmops')
print('from . import scalartypes as scalars')
print('UNARY = {')
for i, code in enumerate(unary.split()):
code = code.replace('not','xor')
if '2' in code:
srct = stypes[code[0]]
destt = stypes[code[2]]
else:
srct = destt = stypes[code[0]]
print(' 0x{:02X}: (jvmops.{}, scalars.{}, scalars.{}),'.format(i + 0x7b, code.upper(), srct, destt))
print('}')
print('BINARY = {')
for i, code in enumerate(binary.split()):
st = stypes[code[0]]
# shift instructions have second arg an int even when operating on longs
st2 = 'INT' if 'sh' in code else st
print(' 0x{:02X}: (jvmops.{}, scalars.{}, scalars.{}),'.format(i + 0x90, code.upper(), st, st2))
print('}')
print('BINARY_LIT = {')
for i, code in enumerate(binlit.split()):
print(' 0x{:02X}: jvmops.{},'.format(i + 0xd0, code.upper()))
print('}')
| UTF-8 | Python | false | false | 2,867 | py | 35 | genmathops.py | 31 | 0.667248 | 0.648413 | 0 | 65 | 43.107692 | 174 |
ukaserge/ocr | 4,535,485,506,087 | 74998461273980ea28d88d1d1efbd52b82e22237 | 2e48ae5dac46c9fd063bcd9312ac618f3e4ba466 | /Machine learnig OCR/svm.py | 659b3ec217025bcdb88cb798075040baf532840e | []
| no_license | https://github.com/ukaserge/ocr | 39ca88eb8130f2c77936394216f2edbb774144d9 | 0e80d0a09580e99c418f0073f23bba27eeb82f06 | refs/heads/master | 2023-03-16T18:19:41.743069 | 2020-03-24T23:07:54 | 2020-03-24T23:07:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.model_selection import cross_val_score
# lien documentation : https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# charger les données
data = pd.read_csv('caract.csv')
df_x = data.iloc[:, 0:19]
df_y = data.iloc[:, -1]
# diviser les données en jeux d'entrainement et de teste
# les données de teste representent 20% de tous les données dans le fichier caract.csv
for i in range(10, 20):
print("\n====== %d ========\n" % i)
x_train, x_test, y_train, y_test = train_test_split(
df_x, df_y, test_size=0.20, random_state=i)
# creé le svm
clf = svm.SVC(kernel='linear', C=20).fit(x_train, y_train)
# print(y_test.values)
print(clf.score(x_test, y_test))
| UTF-8 | Python | false | false | 840 | py | 11 | svm.py | 6 | 0.682635 | 0.664671 | 0 | 26 | 31.115385 | 93 |
somethingx02/AutoHasher | 8,478,265,446,870 | dcc67555c97adb350f87f35535a46c95b2c93bec | 3979029b46dd20b19a8bf2ef4ed4125abf95a630 | /FeatureHashingSenti/src/models/biLSTMAttention.py | 413b3faa8243b1ab1eca266f6b2fa2f5aee756a1 | []
| no_license | https://github.com/somethingx02/AutoHasher | 95ba62f04ec4924ef6fb6aff56ae985090bc2d48 | 1fd48cb2bf8929a90828b23cecfb4901f84b06a6 | refs/heads/master | 2020-03-21T21:06:07.306584 | 2018-07-01T19:44:09 | 2018-07-01T19:44:09 | 139,046,111 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf8 -*-
import torch
import torch.nn #torch.nn and torch two different module from torch
import torch.nn.functional
import torch.autograd
from allennlp.modules.elmo import Elmo
from models.attention import Attention
# when debugging, should change to another absolute path import from attention import Attention
#from attention import Attention #Debugging
import os
class BiLSTMAttention(torch.nn.Module):
'''
You have to inherent nn.Module to define your own model
two virtual functions, loda and save must be instantialized
'''
def __init__(self,
param_document_seq_len,# 300 in our model
param_character_embedding_len, #it depends on the setting
param_bilstm_hidden_size, # the size of tweet level rnn, since it's biLSTM should be divided twice
param_attention_size, # attention size should be a smoothed representation of character-emb
param_class_count,# class of user labels, in fact it is the class of level 1 labels
param_options_file, # elmo file for options
param_weight_file): # elmo file for weight
super(BiLSTMAttention,self).__init__()
self.modelname='BiLSTMAttention' #same with the class name
self.document_seq_len = param_document_seq_len
self.character_embedding_len = param_character_embedding_len
self.bilstm_hidden_size = param_bilstm_hidden_size
self.attention_size = param_attention_size
self.class_count = param_class_count
self.elmo_layer = Elmo( param_options_file, param_weight_file, num_output_representations = 1, dropout = 0 )
self.elmo_hiddensize = 1024 # this is fixed, after elmo_layer, the CharEmbLen should be transferred to ElmoHiddensize
self.bilstm_document_layer_count = 2 # 2 BiLSTM layers
self.bilstm_document = torch.nn.LSTM( self.elmo_hiddensize, self.bilstm_hidden_size, self.bilstm_document_layer_count, dropout = 0.0, bidirectional = True ) #, default batch_first = False, the batch_size = second
self.attention_over_seq = Attention( self.attention_size, self.bilstm_hidden_size * 2 ) # to handle biLSTM output
self.linear = torch.nn.Linear( self.elmo_hiddensize , self.class_count )
def load(self , path):
'''
cpu => cpu or
gpu => gpu
'''
self.load_state_dict( torch.load(path) )
def save(self, path):
save_result = torch.save( self.state_dict() , path )
return save_result
def load_cpu_from_gputrained(self, path):
self.load_state_dict( torch.load(path, map_location = 'cpu') )
def forward( self , param_input ):
'''
from input to output
'''
( batch_size , doc_seq_len , char_emb_len ) = param_input.size()
assert self.document_seq_len == doc_seq_len
assert self.character_embedding_len == char_emb_len
list_elmo_rep = self.elmo_layer( param_input )['elmo_representations']
var_elmo_rep = list_elmo_rep[0]
# since num_output_representations = 1, so len(list_elmo_rep) = 1,
# if num_output_representations == 2, then will produce 2 same elmo_representations of [batch_size, seq_len, wordembedding_len]
##----------an alternative
#list_elmo_rep = self.elmo_layer( param_input )['elmo_representations']
#var_elmo_rep = torch.cat( list_elmo_rep, dim = 0 ) # concatenate seq of tensors
##e.g.: [(8,23,50),(8,23,50)] -> (16,23,50), so here: [(8,23,50)] -> (8,23,50)
##----------an alternative
#print( var_elmo_rep.size() )
var_elmo_rep = var_elmo_rep.permute( 1, 0, 2 ) # not batch_first
var_bilstm_document_output, (var_bilstm_document_output_h, var_bilstm_document_output_c) = self.bilstm_document( var_elmo_rep )
var_bilstm_document_output = var_bilstm_document_output.permute( 1, 0, 2 ) # batch_first again
# #output is (batch , seq , hiddesize * 2 ) # it's concatenated automatically
# #var_bilstm_document_output = torch.cat( ( var_twitter_embedded , var_only_topic_embedding ) ,dim = 3 )
# #now is batch * seq * hiddesize * 2
# batch_size , seq , hiddesize * 2
#print( var_bilstm_document_output.size() )
var_attentioned_output = self.attention_over_seq( var_bilstm_document_output )
# var_attentioned_output is batch , hiddensize * 2
var_attentioned_output = self.linear( var_attentioned_output )
#print( var_attentioned_output.size() )
return var_attentioned_output
def forward_obtainTrainedElmoRep(self , param_input):
'''
compute the ElmoRep after training
'''
( batch_size , doc_seq_len , char_emb_len ) = param_input.size()
assert self.document_seq_len == doc_seq_len
assert self.character_embedding_len == char_emb_len
dict_elmo = self.elmo_layer( param_input )
return dict_elmo
if __name__ == '__main__':
# m = torch.randn(4,5,6)
# print(m)userandneighbor_size
# m_var = torch.autograd.Variable( m )
# #ids = torch.Tensor([1,1,0,0]).long() #autograd = false by acquiescence
# #var2 = m.gather(1, ids.view(-1,1))
# ids = torch.LongTensor( [ 2 , 4 ] )
# ids_var = torch.autograd.Variable( ids )
#they have the same function
# var2 = m.index_select( 2 , ids )
# print( var2 )
# var3 = torch.index_select( m , 2 , ids )
# print( var3 )var_only_userprev_dim
# var2_var = m_var.index_select( 2 , ids_var )
# print(var2_var)
# #model=model.cpu() #load the model to the CPU, 2 different ways to load the model
# #model=model.cuda() #load the model to the GPU
# var_test_expand = torch.autograd.Variable( torch.Tensor( [ [1 ,2 ,3 ,4 , 5, 6] , [7,8,9,10,11,12] , [ 13,14,15,16,17,18 ] ] ) )
# var_test_expanded = var_test_expand.expand( 2, 3, 6 ) # expand the (3 , 6) into higher dimensions
# print(var_test_expanded)
# var_test_mult = torch.autograd.Variable( torch.Tensor( [ [ 1 ] , [ 2 ] ] ) )
# var_test_fac = torch.autograd.Variable( torch.Tensor( [ 2 ] ) )
# var_test_mult = var_test_mult.mul( var_test_fac )
# print( var_test_mult )
# var_test_mult = var_test_mult * 2 + 10
# print( var_test_mult )
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2" #cuda device id
var_input = torch.autograd.Variable( torch.ones( [16, 1219, 50], dtype = torch.long ) ) #torch.LongTensor( 128, 23, 50 ) )
var_input = var_input * 261
att_model_test = BiLSTMAttention(
param_document_seq_len = 1219,# 300 in our model
param_character_embedding_len = 50, #it depends on the setting
param_bilstm_hidden_size = 1024 // 2, # the size of tweet level rnn, since it's biLSTM should be divided twice
param_attention_size = (1024 // 2 * 2) // 1024 * 1024 + (1024 // 2 * 2) % 1024, # attention size should be a smoothed representation of character-emb
param_class_count = 5,
param_options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json",
param_weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5")
#res=att_model_test( var_input )
att_model_test = att_model_test.cuda()
var_input = var_input.cuda()
att_model_test( var_input ) | UTF-8 | Python | false | false | 7,886 | py | 13 | biLSTMAttention.py | 5 | 0.611971 | 0.582171 | 0 | 170 | 45.394118 | 224 |
xcsliu/pycharm_obj | 16,896,401,384,412 | 7e8bd429f707ab98445d59d3edbc20a787ccc721 | 2930d5ae25d81cff5d503f66de451b68b0891a90 | /poi/source/baidu_poi_source.py | c955a25aed6980746becd34a8a48fb2515e5640f | []
| no_license | https://github.com/xcsliu/pycharm_obj | 77957b8e4ef03af3e8fcd56b877c389751c4ab20 | f6da3bf20155d2f7152af98f5ab4e5ab02e858c1 | refs/heads/master | 2021-01-01T06:15:57.948509 | 2017-09-13T17:56:17 | 2017-09-13T17:56:17 | 97,396,933 | 1 | 0 | null | false | 2017-07-30T11:27:19 | 2017-07-16T16:45:39 | 2017-07-16T16:45:39 | 2017-07-30T11:27:19 | 19 | 0 | 0 | 0 | null | null | null | # total poi
def get_poi_with_lat_lng_radius(lat, lng, radius=1):
pass
# each kind
def get_traffic_station_with_lat_lng_radius():
pass
def get_restaurant_with_lat_lng_radius():
pass
def get_education_source_with_lat_lng_radius():
pass
def get_hospital_with_lat_lng_radius():
pass
def get_park_with_lat_lng_radius():
pass
# street view and maps
def get_street_view_with_lat_lng(lat, lng):
pass
def get_base_maps_with_lat_lng(lat, lng):
pass
| UTF-8 | Python | false | false | 479 | py | 38 | baidu_poi_source.py | 37 | 0.680585 | 0.678497 | 0 | 26 | 17.384615 | 52 |
Vivekagent47/HackerRank | 730,144,445,966 | 11a0a0e32b07f3c7b3c00a8fc76f74a9293eaf63 | 9ae51674c1a89799ef74c2f0067b258c33e45954 | /Python/33.py | 1603c31698fe9c62853efd58cc3fd272a776a200 | []
| no_license | https://github.com/Vivekagent47/HackerRank | f26de95e8eb8d5d3cc2627071f1c7031b097cf8a | 6c66dcb82b65bf60177ad1b1ef1910582db6a29b | refs/heads/master | 2023-07-10T12:31:32.284603 | 2021-08-23T15:37:03 | 2021-08-23T15:37:03 | 258,414,783 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import defaultdict
d, n = defaultdict(list), list(map(int, input().split()))
for i in range(n[0]):
d[input()].append(i + 1)
for i in range(n[1]):
print(' '.join(map(str, d[input()])) or -1)
# Takes very long time to execute
# n, m = map(int, input().split())
# listA = list()
# listB = list()
# for i in range(0,n):
# listA.append(input())
# for i in range(0,m):
# listB.append(input())
# for i in range(m):
# for j in range(n):
# if listB[i] == listA[j] and listB[i] in listA:
# print(j+1,end=" ")
# elif listB[i] not in listA:
# print("-1")
# print()
| UTF-8 | Python | false | false | 644 | py | 83 | 33.py | 72 | 0.540373 | 0.52795 | 0 | 30 | 20.433333 | 57 |
Bennyelg/magic_reader | 3,659,312,178,600 | 0f1b23d3ce0745614ead8fa9d18d35d259bb250d | bdb2781b5c150c32e96696af535aa56b9861eb0d | /magic_reader/readers/CsvReader.py | 6912199cba71a506a556c99180b6b2504820fa1c | []
| no_license | https://github.com/Bennyelg/magic_reader | ac65d49601668314979eac54d41df619a6240399 | cd2687c2e263d244c2b8570cf5b2aee37075756f | refs/heads/master | 2017-11-12T15:49:19.009072 | 2017-03-24T17:40:45 | 2017-03-24T17:40:45 | 84,361,034 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import parse_source_path, s3_read
import unicodecsv as csv
import cStringIO
class CsvReader(object):
def __init__(self, source_path, real_path=None):
self.source_path = source_path
self.return_as_dict = False
self.delimiter = None
self.quoting = None
self.escape_char = None
self.real_path = real_path
args = parse_source_path(self.real_path) if self.real_path else parse_source_path(self.source_path)
if "return_as" in args: # pragma: no cover
self.return_as_dict = False if not args.get("return_as") else True
if "quoting" in args: # pragma: no cover
self.quoting = args.get("quoting")
if "escape_char" in args: # pragma: no cover
self.escape_char = args.get("escape_char")
if "delimiter" in args: # pragma: no cover
self.delimiter = args.get("delimiter")
self.path = args.get("path")
self.chunk_size = None if "chunk_size" not in args else args["chunk_size"]
def read(self):
elements = ""
if "s3" in self.path: # pragma: no cover
for element in s3_read(path=self.path, chunk_size=self.chunk_size):
elements += element
if self.return_as_dict: # pragma: no cover
fieldnames = next(cStringIO.StringIO(elements)).split(self.delimiter if self.delimiter else ",")
data = csv.DictReader(cStringIO.StringIO(elements), fieldnames=fieldnames,
quoting=csv.QUOTE_ALL if not self.quoting else self.quoting,
escapechar=None if not self.escape_char else self.escape_char)
for row in data:
yield row
else: # pragma: no cover
data = csv.reader(cStringIO.StringIO(elements), delimiter=self.delimiter if self.delimiter else ",",
quoting=csv.QUOTE_ALL, escapechar=None if not self.escape_char else self.escape_char)
for row in data:
yield row
else:
try: # pragma: no cover
reader = open(self.path, "rb")
except Exception as err: # pragma: no cover
raise err
fieldnames = next(reader).split(self.delimiter if self.delimiter else ",")
if self.return_as_dict:
data = csv.DictReader(reader, fieldnames=fieldnames,
quoting=csv.QUOTE_ALL if not self.quoting else self.quoting,
escapechar=None if not self.escape_char else self.escape_char)
for row in data:
yield row
else: # pragma: no cover
data = csv.reader(reader)
for row in data:
yield row
reader.close()
| UTF-8 | Python | false | false | 2,912 | py | 18 | CsvReader.py | 11 | 0.553228 | 0.552198 | 0 | 62 | 45.935484 | 119 |
jjojohjohn/CodingInterviews | 15,333,033,271,011 | 3fbc6053f97ecf7f33c6052da8eb0cb152a2a84d | 739e6e6dc0c7442dbbff796c7a900486d13d0e62 | /ch2/2.3.py | 0c06f4d847e2e98e19f5d2e651027f426f7b7576 | []
| no_license | https://github.com/jjojohjohn/CodingInterviews | 059bc054a22be75be584958b85123d458931af3a | 840172a92e491b63e05fdc4bafa17d25db218e38 | refs/heads/master | 2021-05-02T06:23:57.983928 | 2014-11-12T00:40:56 | 2014-11-12T00:40:56 | 12,779,575 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from linkedList import Node
def removeMiddleNode(middle):
if middle == None:
return 'Cannot remove a "None" node'
if middle.next == None:
return 'Next middle node does not extist'
middle.data = middle.nextNode.data
if middle.nextNode.nextNode != None:
middle.nextNode = middle.nextNode.nextNode
else:
middle.nextNode = None
test = Node(3)
test.addToTail(4)
test.addToTail(5)
test.addToTail(5)
test.addToTail(3)
test.addToTail(6)
| UTF-8 | Python | false | false | 483 | py | 45 | 2.3.py | 45 | 0.68323 | 0.670807 | 0 | 19 | 24.421053 | 50 |
estraviz/codewars | 10,788,957,849,119 | e45d3652b6b68c4c3b92de25dce5f8bbc33621fa | f30b91db647dca1f77fffa4b7e26b6c6a68abbc6 | /7_kyu/Odder Than the Rest/test_odd_one.py | 30cb82b8aa744815db79d04d25f7fe5c0bb6b9ea | []
| no_license | https://github.com/estraviz/codewars | 73caf95519eaac6f34962b8ade543bf4417df5b7 | 5f8685e883cb78381c528a0988f2b5cad6c129c2 | refs/heads/master | 2023-05-13T07:57:43.165290 | 2023-05-08T21:50:39 | 2023-05-08T21:50:39 | 159,744,593 | 10 | 55 | null | null | null | null | null | null | null | null | null | null | null | null | null | from odd_one import odd_one
def test_odd_one():
assert odd_one([2, 4, 6, 7, 10]) == 3
assert odd_one([2, 16, 98, 10, 13, 78]) == 4
assert odd_one([4, -8, 98, -12, -7, 90, 100]) == 4
assert odd_one([2, 4, 6, 8]) == -1
| UTF-8 | Python | false | false | 235 | py | 2,206 | test_odd_one.py | 1,670 | 0.510638 | 0.353191 | 0 | 8 | 28.375 | 54 |
lucashsouza/Desafios-Python | 1,821,066,177,685 | bbe2a07b1051a34938d449885b5a369980440440 | 9c7581c3b862174878a5e71609f94b3e5a2de5c9 | /CursoEmVideo/Aula07/ex015.py | 24f5b356e198b163628e652f2805aae725d8ba8c | [
"MIT"
]
| permissive | https://github.com/lucashsouza/Desafios-Python | 6d9fdc3500e0d01ce9a75201fc4fe88469928170 | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | refs/heads/master | 2020-06-21T16:49:32.884025 | 2019-07-23T01:23:07 | 2019-07-23T01:23:07 | 143,765,113 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print('Aluguel de Carro')
dias = int (input('Quantos dias de dias: '))
km = float(input('Km rodados: '))
total = (dias * 60) + (km * 0.15)
print ('Valor total a ser pago: R${:.2f}'.format(total))
| UTF-8 | Python | false | false | 201 | py | 106 | ex015.py | 105 | 0.606965 | 0.577114 | 0 | 5 | 38.2 | 56 |
aohanyao/Advanced | 18,537,078,859,203 | cfcb828911ad135c2d7fb09d701afb84541e0a0c | 04bd6fbe71412a1456ee091323718cd100103499 | /python/Sample1/backup/com/jjc/practice/rent/db_query.py | e389e2cd7d1b68c3753716e94bfb37438f3ac97f | []
| no_license | https://github.com/aohanyao/Advanced | 085c091ac379875a12dfec884c6f7ba14a810cdb | 849830103300bc35d79eeae5b29cce965f331a0d | refs/heads/master | 2022-10-10T09:51:27.308522 | 2019-01-15T08:27:25 | 2019-01-15T08:27:25 | 64,308,611 | 4 | 10 | null | false | 2022-10-07T13:07:23 | 2016-07-27T12:59:48 | 2019-01-15T08:27:35 | 2019-01-15T08:27:33 | 90,852 | 4 | 6 | 1 | Python | false | false | #!/usr/bin/python3
import pymysql
# 打开数据库连接
db = pymysql.connect("localhost", "root", "root", "python_58_rent")
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 拼接sql
sql = '''SELECT * FROM rent_58_list'''
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
name = row[1]
print(name)
except Exception as e:
print("发生错误")
# 关闭数据库连接
db.close()
| UTF-8 | Python | false | false | 466 | py | 164 | db_query.py | 101 | 0.639303 | 0.624378 | 0 | 19 | 20.157895 | 67 |
jfm44/TestsPython | 14,663,018,374,802 | 63be872f5aeb22bd594d7c9a91514930f8455244 | 0fe9e46d5b3a171e61900f7847fa120960c758ae | /Exercices/exo17_dataclass1.py | 4842fe8f9ec17e1164910701da7da5e914aba35e | []
| no_license | https://github.com/jfm44/TestsPython | 5743604f1027e6a311b8e01fde267ff767a8b2cc | 292295d621de60f04f43a2b22cbeab5d4b2e2551 | refs/heads/master | 2020-05-25T20:24:57.142818 | 2019-05-24T15:09:33 | 2019-05-24T15:09:33 | 187,976,391 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dataclasses import dataclass
class Inv:
n: str="a"
px: float=1.0
nb: int=1
def cost(self) -> float:
return self.px * self.nb
# la methode init est implicite
@dataclass
class Inventaire:
nom: str="a"
prix_unitaire: float=1.5
nbr: int = 2
def cout(self) -> float:
return self.prix_unitaire * self.nbr
if __name__ == "__main__":
a = Inv()
print("attributs de a(Inv) : ",a.__dict__)
print("a.cost() : ",a.cost())
print("")
b = Inventaire()
print("attributs de b(Invenaire) : ",b.__dict__)
print("b.cout() : ",b.cout())
| UTF-8 | Python | false | false | 613 | py | 51 | exo17_dataclass1.py | 50 | 0.546493 | 0.536705 | 0 | 32 | 18.15625 | 52 |
lightning-sprinkle/lightning-sprinkle-user-service | 6,451,040,881,156 | 5e3471d6343a13408d3b7b8661616fd3561cc380 | d0110f6f0fc5620b43cef3fb0fd25d74b00e2527 | /app/cert.py | 270e24e5997f2a84eb330424c979fc74032298e2 | [
"MIT"
]
| permissive | https://github.com/lightning-sprinkle/lightning-sprinkle-user-service | 7902132def35b94a1ae8cdda4a040fa09d2ed364 | c5f44d17da2a9894982e203aa1fbcc6f74753db2 | refs/heads/master | 2022-10-10T00:41:14.625619 | 2020-03-10T02:45:55 | 2020-03-10T02:45:55 | 243,968,261 | 0 | 0 | MIT | false | 2022-09-23T22:36:17 | 2020-02-29T12:51:40 | 2020-03-10T02:45:57 | 2022-09-23T22:36:17 | 128 | 0 | 0 | 3 | Python | false | false | import ssl
from cryptography import x509
from cryptography.hazmat.backends import default_backend
def isOrganization(hostname):
"""
Function looks up the SSL certificate for the domain, and checks if
it is an OV or EV certificate by reading the following CertificatePolicies
2.23.140.1.2.2: Organization Validation
2.23.140.1.1: Extended Validation
"""
# Create a real connection in order to support SNI (server name indication)
conn = ssl.create_connection((hostname, 443))
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn, server_hostname=hostname)
cert_pem = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
cert = x509.load_pem_x509_certificate(cert_pem.encode(), default_backend())
# Find the certificate type
for policy in cert.extensions.get_extension_for_class(x509.CertificatePolicies).value:
oid = policy.policy_identifier.dotted_string
if oid == '2.23.140.1.2.2' or oid == '2.23.140.1.1':
return True
return False
| UTF-8 | Python | false | false | 1,006 | py | 12 | cert.py | 8 | 0.740557 | 0.689861 | 0 | 26 | 37.692308 | 88 |
washyn/face_recognition_ms_cf | 8,787,503,095,149 | 918f39e5fc08c0106a31cd74cf6b1b7fa46b90ef | 651bf745b71c7da30b46c8e766a09adf7ad9d2c2 | /models.py | 222a7206c481c6806d0ed95ffd139625c6e8046d | []
| no_license | https://github.com/washyn/face_recognition_ms_cf | c18065f22893a6a32d507df220425ff5e46859d9 | baee35ab303122f9ea162869b51feb6692c764ac | refs/heads/main | 2023-02-27T05:33:03.114505 | 2021-01-30T22:42:43 | 2021-01-30T22:42:43 | 314,428,345 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from global_variables import *
from peewee import *
import datetime
import uuid
db = SqliteDatabase(sqliteDbFileName)
class BaseModel(Model):
class Meta:
database = db
# class User(BaseModel):
# username = CharField(unique=True)
# class Tweet(BaseModel):
# user = ForeignKeyField(User, backref='tweets')
# message = TextField()
# created_date = DateTimeField(default=datetime.datetime.now)
# is_published = BooleanField(default=True)
class Student(BaseModel):
fullName = CharField(unique=True)
code = CharField(unique=True)
personGuid = CharField(unique=False)
folderGuid = CharField(unique=False)
personId = CharField(unique=False)
# personGuid = UUIDField()
# faceIdentifier = CharField(unique=False)
def __str__(self):
return f"Full name:{self.fullName}\nCode:{self.code}\nPerson guid:{self.personGuid}\nFolder guid:{self.folderGuid}" | UTF-8 | Python | false | false | 922 | py | 32 | models.py | 25 | 0.701735 | 0.701735 | 0 | 38 | 23.289474 | 123 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.