repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tumbleweed91/todo
| 15,015,205,706,656 |
517eb248a7af0ff53509d239db030acf39b67419
|
4b9b05025f79fcf99217474a02c8b0ffd9416f30
|
/general/models.py
|
03b492532684f8f09b23bb2c453740b18176f988
|
[] |
no_license
|
https://github.com/tumbleweed91/todo
|
c890004accbe2831c3257fcad5d98b980f9a1b1f
|
d1776e73124d9d2e78ef85e606a0474644689017
|
refs/heads/master
| 2016-09-22T16:59:41.694810 | 2016-06-03T22:38:43 | 2016-06-03T22:38:43 | 60,289,192 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import unicode_literals
from django.db import models
class ToDoEntry(models.Model):
text = models.CharField(max_length=25)
done = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.text
|
UTF-8
|
Python
| false | false | 298 |
py
| 6 |
models.py
| 5 | 0.708054 | 0.701342 | 0 | 12 | 23.833333 | 52 |
IBM-Security/ibmsecurity
| 12,360,915,905,554 |
21423e7ef9105da8da44d74385cb0eb92fac7383
|
33b3d332deafae90bff495ae7caffc98f331089b
|
/ibmsecurity/isam/web/reverse_proxy/federation_configuration.py
|
42dcef5c77c2bba66effa7b0c705af2271f85ca2
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/IBM-Security/ibmsecurity
|
5698e8f52db6e04f759d67fc3ecdd976de233fa4
|
66f7673c5757273eb2afa4d1734e35efbd6f3fc0
|
refs/heads/master
| 2023-08-11T09:22:38.009427 | 2023-07-06T11:54:32 | 2023-07-06T11:54:32 | 85,416,486 | 50 | 96 |
Apache-2.0
| false | 2023-07-06T11:54:33 | 2017-03-18T16:40:53 | 2023-06-23T07:40:23 | 2023-07-06T11:54:32 | 1,161 | 44 | 75 | 32 |
Python
| false | false |
import logging
import ibmsecurity.isam.fed.federations
logger = logging.getLogger(__name__)
def config(isamAppliance, instance_id, federation_id=None, federation_name=None, hostname='127.0.0.1', port='443', username='easuser',
password='passw0rd', reuse_certs=False, reuse_acls=False, check_mode=False, force=False):
"""
Federation configuration for a reverse proxy instance
:param isamAppliance:
:param instance_id:
:param federation_id:
:param federation_name:
:param hostname:
:param port:
:param username:
:param password:
:param reuse_certs:
:param reuse_acls:
:param check_mode:
:param force:
:return:
"""
if federation_name is not None:
ret_obj = ibmsecurity.isam.fed.federations.search(isamAppliance, name=federation_name, check_mode=check_mode,
force=force)
federation_id = ret_obj['data']
if federation_id == {}:
logger.info("Federation {0}, not found. Skipping config.".format(federation_name))
return isamAppliance.create_return_object()
if federation_id is None:
logger.info("Required parameter federation_id missing. Skipping config.")
return isamAppliance.create_return_object()
if force is True or _check(isamAppliance, instance_id, federation_id) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Federation configuration for a reverse proxy instance",
"/wga/reverseproxy/{0}/fed_config".format(instance_id),
{
"runtime": {
"hostname": hostname,
"port": port,
"username": username,
"password": password
},
"federation_id": federation_id,
"reuse_certs": reuse_certs,
"reuse_acls": reuse_acls
})
return isamAppliance.create_return_object()
def unconfig(isamAppliance, instance_id, federation_id=None, federation_name=None, check_mode=False, force=False):
"""
Federation unconfiguration for a reverse proxy instance
:param isamAppliance:
:param instance_id:
:param federation_id:
:param federation_name:
:param check_mode:
:param force:
:return:
"""
if federation_name is not None:
ret_obj = ibmsecurity.isam.fed.federations.search(isamAppliance, name=federation_name, check_mode=check_mode,
force=force)
federation_id = ret_obj['data']
if federation_id == {}:
logger.info("Federation {0}, not found. Skipping config.".format(federation_name))
return isamAppliance.create_return_object()
if federation_id is None:
logger.info("Required parameter federation_id missing. Skipping config.")
return isamAppliance.create_return_object()
if force is True or _check(isamAppliance, instance_id, federation_id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Federation unconfiguration for a reverse proxy instance",
"/wga/reverseproxy/{0}/fed_config/{1}".format(instance_id, federation_id))
return isamAppliance.create_return_object()
def _check(isamappliance, instance_id, federation_id):
# WebSEAL has a stanza that should contain the configured federations
from ibmsecurity.isam.web.reverse_proxy.configuration import entry
ret_obj = entry.get_all(isamappliance, reverseproxy_id=instance_id, stanza_id="isam-fed-autocfg")
# IF there is any exception - i.e. stanza not found return False
try:
if federation_id in ret_obj['data']:
logger.info("federation_id {0} found in reverse_proxy stanza isam-fed-autocfg.".format(federation_id))
return True
except:
pass
return False
|
UTF-8
|
Python
| false | false | 4,206 |
py
| 303 |
federation_configuration.py
| 300 | 0.617689 | 0.613885 | 0 | 112 | 36.553571 | 134 |
jmslagle/nagplugins
| 7,524,782,731,945 |
4de818fb99237702cfd870e3c150b3315b65acbb
|
ed4988957c35a237423bb534ebced640fb9f4aeb
|
/check_pcp_node_status.py
|
98abe362b25e963332eb9a8c75d1bc0e2db0b411
|
[] |
no_license
|
https://github.com/jmslagle/nagplugins
|
0688954eb0e7421a24750580af601cd1e68aaaaa
|
41c62858c2fc802c548f4a83529f5a38b994b6c1
|
refs/heads/master
| 2016-09-06T06:45:18.361687 | 2012-06-06T19:35:33 | 2012-06-06T19:35:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# check_pcp_node_status - check PGPool node status
#
# Python Nagios Template from
# http://bsd.dischaos.com/2009/04/29/nagios-plugin-template-in-python/
import sys, getopt, subprocess
nagios_codes = {'OK': 0,
'WARNING': 1,
'CRITICAL': 2,
'UNKNOWN': 3,
'DEPENDENT': 4}
def usage():
""" returns nagios status UNKNOWN with
a one line usage description
usage() calls nagios_return()
"""
nagios_return('UNKNOWN',
"usage: %s -H host -P port -u user -p pass [-n node]" % sys.argv[0])
def nagios_return(code, response):
""" prints the response message
and exits the script with one
of the defined exit codes
DOES NOT RETURN
"""
print code + ": " + response
sys.exit(nagios_codes[code])
def do_nodeinfo_check(host, port, user, pwd, node):
retcode=0
args = ["pcp_node_info", "30", host, port, user, pwd, node]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE)
except OSError:
retcode=-1
s = 0
w = 0
if (retcode!=-1):
p.wait()
retcode = p.returncode
out = p.communicate()[0].strip()
ov = out.split()
s = ov[2]
w = ov[3]
return [retcode, s, w]
def get_node_count(host, port, user, pwd):
retcode=0
args = ["pcp_node_count", "30", host, port, user, pwd]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE)
except OSError:
retcode=-1
c=0
if (retcode!=-1):
p.wait()
retcode = p.returncode
out = p.communicate()[0].strip()
c=int(out)
return [retcode,c]
def check_condition(host, port, user, pwd, node):
ret={}
if (node!=None):
(retcode, status, weight) = do_nodeinfo_check(host, port, user, pwd, node)
if (retcode!=0):
ret={"code": "UNKNOWN", "message": "pcp_node_info returned non zero"}
else:
if (status == '2'):
ret={"code": "OK", "message": "Node Status OK - Weight %s" % weight}
elif (status == '3'):
ret={"code": "CRITICAL", "message": "Node Status CRITICAL - Node down"}
else:
ret={"code": "UNKNOWN", "message": "Node Status Unknown - %s" % status}
else:
upnodes=[]
downnodes=[]
(r,count)=get_node_count(host, port, user, pwd)
if (r==-1):
ret={"code": "UNKNOWN", "message": "Error running pcp_node_count"}
for i in range(count):
(retcode, status, weight) = do_nodeinfo_check(host, port, user, pwd, str(i))
if (retcode!=0):
ret={"code": "UNKNOWN", "message": "pcp_node_info returned non zero"}
break
if (status=='2'):
upnodes.append([i,status])
else:
downnodes.append([i,status])
if (ret.has_key("code")):
return ret # Ug I hate returning in more than 1 place in a function
if (len(downnodes)==0):
ret={"code": "OK", "message": "%d nodes up" % count}
else:
m = ""
for n in downnodes:
m = m + "Node: %s, Status: %s " % (n[0], n[1])
ret={"code": "CRITICAL", "message": m}
return ret
def main():
host=None
port=None
user=None
pwd=None
node=None
if len(sys.argv) < 2:
usage()
try:
opts, args = getopt.getopt(sys.argv[1:], "H:P:u:p:n:")
except getopt.GetoptError, err:
usage()
for o, value in opts:
if (o == "-H"):
host = value
elif (o == "-P"):
port = value
elif (o == "-u"):
user = value
elif (o == "-p"):
pwd = value
elif (o == "-n"):
node = value
else:
usage()
if (host == None or port == None or user == None or pwd == None):
usage()
result = check_condition(host, port, user, pwd, node)
nagios_return(result['code'], result['message'])
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 3,676 |
py
| 2 |
check_pcp_node_status.py
| 2 | 0.569369 | 0.557671 | 0 | 142 | 24.887324 | 82 |
HermanSun/Python-Test
| 16,097,537,425,498 |
84ece52f0060d6faafcb7d27631744d16690caf0
|
ad287c40a1a6f967b9daafd701ade9f0ff19ba3b
|
/de.vogella.python.second/src/p3.py
|
9ed00bc92a6093cae877a94d14a7e4a1743293fd
|
[] |
no_license
|
https://github.com/HermanSun/Python-Test
|
bfd856ce0c8880d11844d21a8b850629f5117381
|
3bb4cd88fc91a8f450eaf33c0fa76dfad85692c4
|
refs/heads/master
| 2020-03-26T12:46:30.139339 | 2018-08-15T22:18:33 | 2018-08-15T22:18:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Sep 23, 2016
@author: HermanSun
'''
def list_mul(list1, list2):
rlist=[]
a = len(list1)+len(list2)-1
#print a
for i2 in range(len(list2)):
for i in range(a):
if i < len(rlist):
continue
i1=i-i2
if i1>=len(list1):
break
#print i, i1, i2 #for check number
k=list1[i1]
j=list2[i2]
w=k*j
rlist.append(w)
#print rlist
if a==len(rlist):
return rlist
'''
a1 = [1,3,5]
a2 = [2,4]
print list_mul(a1, a2)
'''
|
UTF-8
|
Python
| false | false | 650 |
py
| 23 |
p3.py
| 21 | 0.410769 | 0.361538 | 0 | 32 | 18.375 | 46 |
AmishaAgrawal-code0103/api_project17
| 11,407,433,170,696 |
263788edfa5c169f49319369ac5f0b4272dbbb71
|
222977ed0ba9dcc4abb7901b536678addf424090
|
/Project17/demo.py
|
d1ae6228f9e3d88884cbf579b056b38e4b6ed1b2
|
[] |
no_license
|
https://github.com/AmishaAgrawal-code0103/api_project17
|
bbe1fed11484acde8ed0faccbae6bf8034bb3efa
|
b1c97d97ff5b7bbb436f4eec5dc89e1e863b2127
|
refs/heads/master
| 2023-01-03T21:18:08.658688 | 2020-10-27T19:58:23 | 2020-10-27T19:58:23 | 307,813,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
import json
BASE_URL = 'http://127.0.0.1:8000/'
END_POINT = 'api/'
'''This function is used to select resource'''
def get_resource(id = None):
data = {}
id = input('Enter the id:\t')
if id is not None:
data = {'id':id}
response = requests.get(BASE_URL+END_POINT, data = json.dumps(data))
print(response.status_code)
print(response.json())
'''This function is used to create resource'''
def create_resource():
sname = input('Enter the student name:\t')
semail = input('Enter the student Email id:\t')
sphone_no = int(input('Enter the student phone no:\t'))
saddress = input('Enter the student address:\t')
stud_data = {'sname':sname, 'semail':semail, 'sphone_no':sphone_no, 'saddress':saddress}
response = requests.post(BASE_URL+END_POINT, data = json.dumps(stud_data))
print(response.status_code)
print(response.json())
'''This function is used to update resource'''
def update_resource(id):
sname = input('Enter the student name:\t')
id = input('Enter the id:\t')
sphone_no = int(input('Enter the student phone no:\t'))
saddress = input('Enter the student address:\t')
semail = input('Enter the Student email:\t')
update_data = {'id':id,'sname':sname, 'semail':semail, 'sphone_no':sphone_no,'saddress':saddress}
response = requests.post(BASE_URL+END_POINT, data = json.dumps(update_data))
print(response.status_code)
print(response.json())
'''This function is used to delete resource'''
def delete_resource(id):
id = input('Enter the id:\t')
data = {'id':id}
response = requests.post(BASE_URL+END_POINT, data = json.dumps(data))
print(response.status_code)
print(response.json())
while True:
print('select your option:\n 1-select single data\n 2-create data\n 3-update data\n 4-delete data\n 5-select complete data\n')
'''option for selecting operation used to perforn'''
option = input('Enter your option no:\t')
if option == '1':
get_resource(id)
elif option == '2':
create_resource()
elif option == '3':
update_resource(id)
elif option == '4':
delete_resource(id)
elif option == '5':
get_resource()
|
UTF-8
|
Python
| false | false | 2,090 |
py
| 6 |
demo.py
| 6 | 0.686124 | 0.676555 | 0 | 75 | 26.84 | 127 |
masa8224/MeguHome
| 14,474,039,825,363 |
5017e926734f7bb7033b30c621a6a130685e8e4b
|
dab04bc6553c0284197ac93485501e9757d38223
|
/Python/test_speak.py
|
e72c3d97b9ad861dd1a51a3fd4114440b36c4ef0
|
[] |
no_license
|
https://github.com/masa8224/MeguHome
|
012cf974761688a7b0f6a4ce733538f1bef8cd91
|
db329904c66fa6124353caaca837f2565f3aeb6e
|
refs/heads/master
| 2021-01-23T22:19:25.999927 | 2017-09-20T12:33:54 | 2017-09-20T12:33:54 | 102,926,235 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from SpeakPython.SpeakPythonRecognizer import SpeakPythonRecognizer;
import time
import udp
#define callback function with 1 parameter (string)
def execute(s):
print s;
if "on" in s:
udp.main("on")
if "off" in s:
udp.main("off")
#creates recognition instance
#param 1 - function accepting 1 string argument - used as callback
#param 2 - appName - used to read [appName].fsg and [appName].db
recog = SpeakPythonRecognizer(execute, "megumi");
# sets the level of debug output
# 1 is the most output, 10 is the least
# default level is 3
recog.setDebug(1);
#call this to start recognizing speech
#I believe this call utilizes current thread. Please multi-thread it yourself if necessary.
stop = False
try:
while not stop:
recog.recognize();
except KeyboardInterrupt:
stop = True;
print "Interrupted.";
finally:
time.sleep(0.00001)
|
UTF-8
|
Python
| false | false | 900 |
py
| 38 |
test_speak.py
| 31 | 0.703333 | 0.686667 | 0 | 32 | 27.125 | 91 |
tkukurin/Lab.SeparatingFactFromFiction
| 16,131,897,201,255 |
66a07195e50b4347b5e124cbdfd826c607ecf0d2
|
d5647b0086277fba5d159a8fe9adeca9e420eb00
|
/util/featureutil.py
|
b9084d3a16b6ff6b922bba3f6203a6379a9d4d06
|
[] |
no_license
|
https://github.com/tkukurin/Lab.SeparatingFactFromFiction
|
e051d58e42a1b2b8b9f5b3a716b04a5621e970eb
|
c64fb8bfc84c7a4125f559ac745fcd18f149da51
|
refs/heads/master
| 2021-09-14T20:36:33.427485 | 2018-05-18T19:46:18 | 2018-05-18T19:46:18 | 125,760,742 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from collections import defaultdict
def dict_features(tokens, feature_set):
ret = defaultdict(int)
for t in tokens:
if t in feature_set:
ret[t] += 1
return ret
from nltk.tokenize import RegexpTokenizer
from util import data_process, functions
from nltk.stem.porter import PorterStemmer
from sklearn.pipeline import TransformerMixin
class TokenizerTransformer(TransformerMixin):
def __init__(self):
self.normalizer = data_process.TweetNormalizer()
self.regex_word_tokenizer = RegexpTokenizer(r'[@]?\w+')
stemmer = PorterStemmer()
stem = lambda ws: [stemmer.stem(w) for w in ws]
self.pipeline = util.compose(
self.normalizer.transform,
self.regex_word_tokenizer.tokenize,
# 10 times faster without stemming
# stem,
)
self.fit = functions.const(self)
self.transform = self.pipeline
|
UTF-8
|
Python
| false | false | 948 |
py
| 39 |
featureutil.py
| 22 | 0.644515 | 0.64135 | 0 | 32 | 28.59375 | 63 |
comedxd/Artificial_Intelligence
| 13,314,398,654,179 |
4f32d0b2293ff8535a870cd9730528ecf4874190
|
408d5ebecf0964eb2609ee787036f41b1966c708
|
/2_DoublyLinkedList.py
|
f38a1a7bf7083c8fb83dbf60dc2e2eb0b7112682
|
[] |
no_license
|
https://github.com/comedxd/Artificial_Intelligence
|
414688e1ea25649c000e3abf40bf97ef5fce1ada
|
667c7fdf1c055b7130c6e289c643a5b4adf8f3af
|
refs/heads/main
| 2023-04-16T02:41:43.195075 | 2021-04-01T12:50:36 | 2021-04-01T12:50:36 | 351,805,360 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class LinkedListNode:
def __init__(self,value,prevnode=None,nextnode=None):
self.prevnode=prevnode
self.value=value
self.nextnode=nextnode
def TraverseListForward(self):
current_node = self
while True:
print(current_node.value, "-", end=" ")
if (current_node.nextnode is None):
print("None")
break
current_node = current_node.nextnode
def TraverseListBackward(self):
current_node = self
while True:
print(current_node.value, "-", end=" ")
if (current_node.prevnode is None):
print("None")
break
current_node = current_node.prevnode
# driver code
if __name__=="__main__":
node1=LinkedListNode("Hello ")
node2=LinkedListNode("Dear ")
node3=LinkedListNode(" AI ")
node4=LinkedListNode("Student")
head=node1
tail=node4
# forward linking
node1.nextnode=node2
node2.nextnode=node3
node3.nextnode=node4
# backward linking
node4.prevnode=node3
node3.prevnode=node2
node2.prevnode=node1
head.TraverseListForward()
tail.TraverseListBackward()
|
UTF-8
|
Python
| false | false | 1,266 |
py
| 28 |
2_DoublyLinkedList.py
| 27 | 0.56872 | 0.554502 | 0 | 45 | 25.822222 | 57 |
Duchyna1/PYTHON
| 15,032,385,555,176 |
ceafe5a5a1224a1b8e4cb0627635c2b777283dbf
|
602cb983fb198464ca7aa1e716e7c92d60d30ab6
|
/projects/raspberrypi/push.py
|
c6b014d0dbb94838bc74a33ce3c219b4c3face2b
|
[] |
no_license
|
https://github.com/Duchyna1/PYTHON
|
2b5c8375123bbc27ab484eb73c90b9b9c00081ea
|
6b189b6145e0ff32fc43285cbcf39e3479bb44c2
|
refs/heads/master
| 2022-11-14T13:49:58.398229 | 2020-06-22T15:01:28 | 2020-06-22T15:01:28 | 238,493,189 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
message = input("Commit message: ")
os.system('git commit -am "{}"'.format(message))
os.system('git push')
|
UTF-8
|
Python
| false | false | 119 |
py
| 93 |
push.py
| 89 | 0.672269 | 0.672269 | 0 | 6 | 18.833333 | 48 |
andrirahmanto/DistanceTransformForWatershed
| 5,025,111,750,831 |
6acfcfc16caabc12965e052415d2bf5c8030afab
|
cebd4e4ce7d93a2267d67042b3690ccaa7759b80
|
/DistanceTranfrom_byCityBlock.py
|
2c5727818c160da7447b2eb9ce039729fba19302
|
[] |
no_license
|
https://github.com/andrirahmanto/DistanceTransformForWatershed
|
84a7a59f38b3cc9e5be3d2f37e1941cb2aca2463
|
358c624a318dccda3f2279cf6e889d9c66d55673
|
refs/heads/master
| 2020-11-28T01:54:41.456760 | 2019-12-23T08:04:33 | 2019-12-23T08:04:33 | 229,673,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def city_block(lis):
matrix = np.array(lis)
# simpan index pixel fore-ground atau pixel yang valunya non-zero kedalam saveindex
saveindex = []
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i,j] != 0:
saveindex.append((i,j))
# buat matrik hasil yang berisi 0 di setiap elementnya
hasil = np.zeros((len(lis),len(lis[0])))
# loop untuk setiap pixel(termasuk pixel fore-ground)
for i1 in range(len(hasil)):
for j1 in range(len(hasil[0])):
lis = []
for i2,j2 in saveindex:
# cari jarak pixel ke semua pixel fore-ground
jarak = abs(i1-i2) + abs(j1-j2)
lis.append(jarak)
# ambil jarak yang paling minimum ke fore-ground yang terdekat
hasil[i1,j1] = min(lis)
return hasil
lis = [[0,0,0,0,0],
[0,0,1,0,0],
[0,1,0,1,0],
[0,0,1,0,0],
[0,0,0,0,0],]
hasil = city_block(lis)
print(hasil)
f,(x,y) = plt.subplots(1,2)
x.imshow(lis, 'gray')
y.imshow(hasil, 'gray')
plt.show()
|
UTF-8
|
Python
| false | false | 1,189 |
py
| 2 |
DistanceTranfrom_byCityBlock.py
| 1 | 0.544996 | 0.510513 | 0 | 42 | 26.309524 | 87 |
paulberesuita/linearalgebra
| 979,252,577,440 |
ffac0148c3a7eea6909e6dbc6add30e76486c5ef
|
914a9eb9e8374414516a72ac80ba9529d551a061
|
/hello.py
|
cd481c0c101221e3394363e98232d6c5c587e0ff
|
[] |
no_license
|
https://github.com/paulberesuita/linearalgebra
|
cd7af9f32b16e45b2d97d57940c4b35b0cf5607f
|
e09b6cc6d7649c6a70d5c4ddadca2040e5138ec7
|
refs/heads/master
| 2016-08-09T14:22:23.562694 | 2016-01-17T19:16:55 | 2016-01-17T19:16:55 | 49,831,388 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
class Vector(object):
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def main():
#TESTING PLUS, MINUS, AND TIME SCALAR FUNCTIONSS
# my_vector = Vector([1,2,3])
# my_vector2 = Vector([2,3,4])
# print addTwoVectors(my_vector, my_vector2)
# my_vectorPlus = plus(Vector([8.218,-9.341]), Vector([-1.129,2.111]))
# my_vectorMinus = minus(Vector([7.119,8.215]), Vector([-8.223,0.878]))
# my_vectorScalar = times_scalar(Vector([1.671,-1.012, -0.318]), 7.41)
# print my_vectorPlus
# print my_vectorMinus
# print my_vectorScalar
#TESTING MAGNITUDE AND NORMALIZE FUNCTIONS
# result1 = magnitude(Vector([-0.221, 7.437]))
# result2 = magnitude(Vector([8.813, -1.331, -6.247]))
# result3 = normalize(Vector([5.581, -2.136]))
# result4 = normalize(Vector([1.996, 3.108, -4.554]))
# print Vector([-0.221, 7.437]), "magnitude is", result1
# print Vector([8.813, -1.331, -6.247]), "magnitude is", result2
# print Vector([5.581, -2.136]), "normalize is", result3
# print Vector([1.996, 3.108, -4.554]), "normalize is", result4
#TESTING DOT PRODUCT AND ANGLE/RADIANS CALCULATION
# result1 = dotProduct(Vector([-5.955, -4.904, -1.874]), Vector([-4.496, -8.755, 7.103]))
# result1 = radiansTwoVectors(Vector([3.183, -7.627]), Vector([-2.668, 5.319]))
result1 = angleTwoVectors(Vector([7.35, 0.221, 5.188]), Vector([2.751, 8.259, 3.985]))
# result2 = angleTwoVectors(Vector([1, 2, -1]), Vector([3, 1, 0]))
# result3 = radiansTwoVectors(Vector([1, 2, -1]), Vector([3, 1, 0]))
print "dot product", result1
# print "angle", result2
# print "radian", result3
def plus(v1, v2):
# print v1.coordinates
# print v2.coordinates
# print zip(v1.coordinates, v2.coordinates)
new_coordinates = [x+y for x,y in zip(v1.coordinates, v2.coordinates)]
# print new_coordinates
return Vector(new_coordinates)
def plus2(v1, v2):
new_coordinates = []
n = len(v1.coordinates)
for i in range(n):
new_coordinates.append(v1.coordinates[i] + v2.coordinates[i])
return Vector(new_coordinates)
def minus(v1, v2):
new_coordinates = [x-y for x,y in zip(v1.coordinates, v2.coordinates)]
return Vector(new_coordinates)
def times_scalar(v, c):
# print "Entering times_scalar function"
# print v
# print c
new_coordinates = [c*x for x in v.coordinates]
return Vector(new_coordinates)
def magnitude(v):
magnitude = 0
# print "vector", v
# print "length", len(v.coordinates)
for i in range(len(v.coordinates)):
# print "v.coordinates[i]", v.coordinates[i]
current = v.coordinates[i] * v.coordinates[i]
# print "current:", current
magnitude = magnitude + current
return math.sqrt(magnitude)
def magnitude2(v):
coordinates_squared = [x*2 for x in v.coordinates]
return math.sqrt(sum(coordinates_squared))
def normalize(v):
new_coordinates = []
for i in range(len(v.coordinates)):
new_coordinates.append(v.coordinates[i]/magnitude(v))
return Vector(new_coordinates)
def normalize2(v):
try:
magnitude = magnitude(v)
return times_scalar(v, 1/magnitude)
except ZeroDivisionError:
raise Exception("Cannot normalize the zero vector")
def dotProduct(v1, v2):
new_coordinates = [x*y for x,y in zip(v1.coordinates, v2.coordinates)]
sum = 0
for i in range(len(new_coordinates)):
sum = sum + new_coordinates[i]
return sum
def radiansTwoVectors(v1, v2):
dotProductResult = dotProduct(v1, v2)
angle = math.acos(dotProductResult/(magnitude(v1)*magnitude(v2)))
return angle
def angleTwoVectors(v1, v2):
radian = radiansTwoVectors(v1, v2)
return math.degrees(radian)
main()
|
UTF-8
|
Python
| false | false | 4,380 |
py
| 1 |
hello.py
| 1 | 0.619178 | 0.558904 | 0 | 137 | 30.978102 | 93 |
victorhcunha/ExerciciosPython
| 13,134,010,030,708 |
59128cf1d2bfdd41b7c62722f8c859c4ce4515ad
|
763d0b857dd8316a6a2c0af97be58cbc6c747987
|
/Programação 1/monitoria3.py
|
ad0a1b3873f3253ece2068e2b30d484adc4bbdf3
|
[] |
no_license
|
https://github.com/victorhcunha/ExerciciosPython
|
af60b737b5c9c8cadd93f9f02de171653c46eda7
|
ba4f6b2324f62de85bb6d00b5263f6324e8ce667
|
refs/heads/master
| 2020-03-28T17:33:34.026276 | 2019-04-18T18:11:15 | 2019-04-18T18:11:15 | 148,801,374 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Inteiros entre eles
n = int(input())
m = int(input())
if n < m:
for i in range(n+1,m):
print(i)
elif m < n:
for i in range(m+1,n):
print(i)
else:
print ("Iguais")
|
UTF-8
|
Python
| false | false | 197 |
py
| 31 |
monitoria3.py
| 26 | 0.507614 | 0.497462 | 0 | 15 | 12.133333 | 26 |
tshrinivasan/tools-for-wiki
| 11,261,404,297,454 |
0a2a9a914177944d3e807a54c34c80d0bd26c2f8
|
f47655ddc23484f4804b0d654b23606c3bd53ac1
|
/pdf-upload-commons/pdf-djvu-uploader-commons.py
|
952f0b3091f44f92042094b375ac4a69399085eb
|
[] |
no_license
|
https://github.com/tshrinivasan/tools-for-wiki
|
51cb8b7744340f1bf610660b43d8430ef9d15a1d
|
6fb975abd1a8edcb6d34461f0a650defd2e11ee6
|
refs/heads/master
| 2023-01-12T11:59:46.032571 | 2022-07-16T05:23:24 | 2022-07-16T05:23:24 | 48,401,880 | 6 | 11 | null | false | 2022-12-27T16:55:20 | 2015-12-22T00:44:53 | 2022-07-04T16:45:45 | 2022-12-27T16:55:19 | 478 | 6 | 9 | 25 |
Python
| false | false |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import wikitools
import poster
import pyexiv2
import os
import shutil
import sys
import time
import datetime
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
#wiki_url = "MediaWiki API url here"
wiki_url = "https://commons.wikimedia.org/w/api.php"
wiki_username = ""
wiki_password = ""
try:
wiki = wikitools.wiki.Wiki(wiki_url)
except:
print "Can not connect with wiki. Check the URL"
login_result = wiki.login(username=wiki_username,password=wiki_password)
#print "login status = " + str(login_result)
if login_result == True:
print "Logged in."
else:
print "Invalid username or password error"
sys.exit()
path = './'
listing = os.listdir(path)
def filetype(file):
return file.split(".")[-1]
def filename(file):
return file.split(".")[-2]
def move_file(pdf_file):
source = pdf_file
destination = "./uploaded-"+ timestamp + "/" + pdf_file
if os.path.isdir("uploaded-" + timestamp):
shutil.move(source,destination)
else:
os.mkdir("uploaded-" + timestamp)
shutil.move(source,destination)
print "Moving the file " + pdf_file + " to the folder 'uploaded-" + timestamp + "'"
def upload_pdf_file(pdf_file):
print "Uploading the file " + pdf_file
file_name = pdf_file
caption = pdf_file
# extension = filetype(image)
# upload_file_name = file_name + "." + extension
file_object=open(pdf_file,"r")
pdf=wikitools.wikifile.File(wiki=wiki, title=file_name)
pdf.upload(fileobj=file_object,comment=caption, ignorewarnings=True)
page_name = pdf_file.replace(" ","_")
page = wikitools.Page(wiki, "File:" + page_name , followRedir=True)
wikidata_part1 = """
=={{int:filedesc}}==
{{Book
| Author =
| Editor =
| Translator =
| Illustrator =
"""
wikidata_part2 = """
| Subtitle =
| Series title =
| Volume =
| Edition =
| Publisher =
| Printer =
| Date =
| City =
| Language = தமிழ்
| Description = {{ta|1=தமிழக அரசால் அறிவிக்கப்பட்ட நாட்டுடைமை நூல்களில் இதுவும் ஒன்று.}}
| Source = {{Institution:Tamil Virtual Academy}}
| Image = {{PAGENAME}}
| Image page =
| Permission = [[File:Tamil-Nadu-Nationalized-Books-Public-Domain-Declaration.jpg|thumb|left|Letter from Tamil Virtual Academy declaring books nationalized by Tamil Nadu government and released under Creative Commons CC0 1.0 Universal Public Domain Dedication license]]
| Other versions =
| Wikisource =s:ta:Index:{{PAGENAME}}
| Homecat =
}}
=={{int:license-header}}==
{{cc-zero}}
[[Category:Books in Tamil]]
[[Category:PDF files in Tamil with CC0 1.0 license]]
[[Category:Books from Tamil Virtual Academy]]
"""
wikidata = wikidata_part1 + "| Title = " + filename(pdf_file).replace("_"," ") + wikidata_part2
page.edit(text=wikidata)
print "File URL = " + wiki_url.split('/w')[0] + "/wiki/File:" + page_name
move_file(pdf_file)
for pdf_file in listing:
if filetype(pdf_file) in ['pdf','PDF','djvu','DJVU']:
upload_pdf_file(pdf_file)
|
UTF-8
|
Python
| false | false | 3,291 |
py
| 51 |
pdf-djvu-uploader-commons.py
| 21 | 0.630167 | 0.625118 | 0 | 142 | 21.274648 | 271 |
Wytamma/Rosalind
| 10,960,756,547,923 |
10c239bf600ba5aa7b2e7ccc6a01c4b9f16cb6ad
|
8cfb139967d5f60072469158e2ed696a93a6d087
|
/bioinformatics_stronghold/edit.py
|
6b1d5e122dfe4c8d2659c167d9b98d0db3588e88
|
[
"MIT"
] |
permissive
|
https://github.com/Wytamma/Rosalind
|
53efb19692b9271fc605a7e0f07df7b49642df20
|
d3ffecc324f806df79c43bc9f89128b5d2a57dfb
|
refs/heads/master
| 2023-04-14T15:53:29.051286 | 2021-04-26T06:48:54 | 2021-04-26T06:48:54 | 264,836,033 | 0 | 0 |
MIT
| false | 2021-04-26T06:48:55 | 2020-05-18T05:27:38 | 2021-02-28T06:22:47 | 2021-04-26T06:48:55 | 177 | 0 | 0 | 0 |
Python
| false | false |
from pyoinformatics import read_fasta
from pyoinformatics.align import matrix
SAMPLE_DATASET = """>Rosalind_39
PLEASANTLY
>Rosalind_11
MEANLY"""
SAMPLE_OUTPUT = 5
def solution(dataset: list) -> str:
fasta = read_fasta(lines=dataset)
Seq1, Seq2 = fasta[0], fasta[1]
w, h = len(Seq1), len(Seq2)
# create a empty matrix
M = [[0 for x in range(w + 1)] for y in range(h + 1)]
# pad the first row and col with edit distances from empty strings
# the distance from '' to HAT is 3 (insertions)
# C A T
# 0 1 2 3
# H 1 0 0 0
# A 2 0 0 0
# T 3 0 0 0
M[0] = [i for i in range(len(Seq1)+1)]
for j in range(len(M)):
M[j][0] = j
# calculate the edit distance from the start to each cell
# i.e. the number of indels or mutations to get there
# the distance from H to C is 1 (mutation)
# C A T
# 0 1 2 3
# H 1 1 0 0
# A 2 0 0 0
# T 3 0 0 0
# the distance from H to CA is 2 (1 del 1 mutation)
# C A T
# 0 1 2 3
# H 1 1 2 0
# A 2 0 0 0
# T 3 0 0 0
# because A and A match there is no extra edit distance
# between CA ans HA and H and CA therefore use the last
# edit distance
# C A T
# 0 1 2 3
# H 1 1 2 3
# A 2 2 1
# T 3
for i, ib in enumerate(Seq1, 1):
for j, jb in enumerate(Seq2, 1):
if ib == jb:
edit = M[j-1][i-1]
else:
edit = min(
M[j -1][i],
M[j][i-1],
M[j-1][i-1]) + 1
M[j][i] = edit
# M[-1][-1] is the total edit distance from the ends
return M[-1][-1]
def test_solution():
assert solution(SAMPLE_DATASET.splitlines(True)) == SAMPLE_OUTPUT
|
UTF-8
|
Python
| false | false | 1,777 |
py
| 38 |
edit.py
| 25 | 0.521666 | 0.466517 | 0 | 70 | 24.385714 | 70 |
AshwiniReddyChalla/TransferLearningForIntentClassification
| 8,366,596,344,202 |
6b9d8fc30c4fc2b04d6fd5fb139f85db6d702b5a
|
e0f6dfce0e7b0c92dbeabb26bb48a803c53ecdf9
|
/intent_classification/softmax_regression/train.py
|
b48db430d00a79e2e1982bbbf117d0ce88d1fd76
|
[] |
no_license
|
https://github.com/AshwiniReddyChalla/TransferLearningForIntentClassification
|
4765212e44879f1ec4476569a0c619f65a273332
|
0752d099372a38732b151ac67ffad804d876044f
|
refs/heads/master
| 2021-09-03T07:31:48.493456 | 2018-01-07T03:12:35 | 2018-01-07T03:12:35 | 108,052,925 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tensorflow as tf
import sys
sys.path.append("../")
import atis_data
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("data_dir", '../../ATIS', "Data directory")
tf.app.flags.DEFINE_integer("in_vocab_size", 10000, "max vocab Size.")
tf.app.flags.DEFINE_string("max_in_seq_len", 20, "max in seq length")
tf.app.flags.DEFINE_integer("max_data_size", 4000, "max training data size")
tf.app.flags.DEFINE_integer("batch_size", 500, "batch size")
tf.app.flags.DEFINE_integer("iterations", 5000, "number of iterations")
tf.app.flags.DEFINE_integer("embedding_size", 25, "size of embedding")
def train():
atis = atis_data.AtisData(FLAGS.data_dir, FLAGS.in_vocab_size,
FLAGS.max_in_seq_len, FLAGS.max_data_size, FLAGS.embedding_size)
number_of_labels = atis.get_number_of_labels()
x = tf.placeholder(tf.float32, [None, FLAGS.max_in_seq_len*FLAGS.embedding_size])
W = tf.Variable(tf.zeros([FLAGS.embedding_size*FLAGS.max_in_seq_len, number_of_labels]))
b = tf.Variable(tf.zeros([number_of_labels]))
y_ = tf.placeholder(tf.float32, [None, number_of_labels])
y = tf.matmul(x, W)+b
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(FLAGS.iterations):
batch_xs, batch_ys = atis.get_next_batch(FLAGS.batch_size)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_x, test_y = atis.get_test_data()
print(sess.run(accuracy, feed_dict={x: test_x, y_: test_y}))
def main(_):
train()
if __name__ == "__main__":
tf.app.run()
|
UTF-8
|
Python
| false | false | 1,787 |
py
| 10 |
train.py
| 10 | 0.705092 | 0.688304 | 0 | 45 | 38.733333 | 89 |
guocongyun/basic-ml-projects
| 18,399,639,915,957 |
84eeb9ba7313aeeb224f9cd8397c58dab3bfec0f
|
d702fd53c02117e8d41b8fddf8dcffb977305614
|
/project2_heoffding_inequality.py
|
1aa7a542cc07127ea93ebf3b7f421ba59b9a60f5
|
[] |
no_license
|
https://github.com/guocongyun/basic-ml-projects
|
4c26ea96a388078baaf10683f5eb787a591cb0df
|
259f6c224b75594591083eb3c8a0af939cd47f1d
|
refs/heads/master
| 2023-04-22T02:16:21.019808 | 2021-05-07T08:37:55 | 2021-05-07T08:37:55 | 304,696,359 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
class coin:
def __init__(self):
self.head = 0
self.tail = 0
def flip(self):
if (np.random.randint(0,2,dtype=int) == 1): self.head += 1
# randint lower bound is inclusive, higher bound is exclusive
else: self.tail += 1
def simulation():
coin_list = np.zeros(3)
randint_ = np.random.randint(1,1000)
lowest_head_freq = 10
for num in range(1000):
coin_ = coin()
for _ in range(10):
coin_.flip()
# if (num == 1):
# coin_list[0] = coin_.head
# elif (num == randint_):
# coin_list[1] = coin_.head
if (coin_.head < lowest_head_freq):
lowest_head_freq = coin_.head
coin_list[2] = lowest_head_freq
return coin_list
def main():
dataset = []
for _ in range(100):
coin_list = simulation()
dataset.extend(coin_list)
# print(_)
print(sum(dataset)/1000)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 992 |
py
| 16 |
project2_heoffding_inequality.py
| 15 | 0.52621 | 0.493952 | 0 | 43 | 22.093023 | 69 |
threkk/domino
| 19,241,453,511,900 |
7cec0f0961ace3ad0facb3e0acca66751965c541
|
92978a00f2f767000cabaa092396e9192022f9b6
|
/main.py
|
42c1ceae59ba9d034bd5f1df3f4b1b77be918e9d
|
[] |
no_license
|
https://github.com/threkk/domino
|
8d18cced052859d4c8e71f47eed566503a600576
|
9c265307b8fe90d208ddf467b83aec26962b19cf
|
refs/heads/master
| 2021-04-06T10:54:15.997363 | 2018-03-13T01:09:09 | 2018-03-13T01:09:09 | 124,470,231 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import argparse
from domino.game import Game
from domino.player import Player
def start_game(player_names):
"""
Starts a game with the given player names. It creates the players, draws 7
tiles for each and starts the game.
:param player_names: Name of the players to play the game.
"""
players = [Player(name) for name in player_names]
game = Game(players)
for player in players:
for i in range(0, 7):
player.draw(game.pop_top())
game.run()
def cli():
"""
Creates a simulation of a dominoes game. It takes the defined players to
play the match until one of them wins. Starting with 7 tiles in hand and
one randomly chosen on the board, the players alternatively place one tile
on the board by matching the free numbers with their own tiles. If a player
cannot play a tile, the player will draw tiles until is able to play a
tile. In case there are no more tiles to play, the player will skip its
turn. The first player to empty its hand wins. If given a board state no
player can place a tile neither draw, the game finishes with a tie.
"""
parser = argparse.ArgumentParser(prog='domino', description=cli.__doc__)
parser.add_argument('players', metavar='<players>', type=str, nargs='+',
help='Players who will be part of the emulation.')
args = parser.parse_args()
start_game(args.players)
if __name__ == '__main__':
cli()
|
UTF-8
|
Python
| false | false | 1,473 |
py
| 12 |
main.py
| 11 | 0.671419 | 0.668703 | 0 | 43 | 33.255814 | 79 |
iangelmx/big-data-visualization
| 19,310,173,006,932 |
610d852cba6cdb2448b0e82b3cae85f9d9f7e32a
|
a144f1b97b2a9d1ffdd5ea1f4eb52dd79986987b
|
/extracting_fb_insights/extracting_data_fb_simple.py
|
ab7913f0a3cdecc7fd24354c830e50763e4fcc70
|
[
"MIT"
] |
permissive
|
https://github.com/iangelmx/big-data-visualization
|
6bd08cf7c166438b618a3de6af2ac8531bbf78bb
|
75d6cacc8dd33ec372af78f719e6cc31fe30d97d
|
refs/heads/master
| 2022-11-21T08:19:05.696340 | 2020-07-25T02:31:02 | 2020-07-25T02:31:02 | 273,782,171 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Script to get some analytics from a FB page and save to CSV/XLSX
This script was developed according the Visualization Big data's course in FCS.
This script helps people of marketing to get more information about the
activity in their social media page in Facebook in a free way.
To make it run you need to make an app in FB for developers, then get a TOKEN with
the permissions of page read/manage posts.
Created by: Ángel Negib Ramírez Álvarez
Github: iangelmx
Email: angel@ninjacom.space
Version: 1.3
First release: 2020-06-21
Last modification: 2020-06-23
"""
#In[1]
import facebook
import requests
import json
import pandas as pd
import datetime
#In[2]:
def get_reaction_from_post( row : pd.core.series.Series ) -> pd.core.series.Series:
post_id = row['id']
reactions = graph.get_connections(post_id, 'reactions', fields='pic,name,pic_large,profile_type,pic_crop,can_post,type,link,id')
#Para irnos moviendo entre las páginas
paging = reactions.get('paging')
reactions = reactions['data']
for react in reactions:
row[ react['type'].lower() ] +=1
row[ 'users_likes' ].append( (react['name'], ) )
return row
#In[3]
settings = json.loads(open("settings.json").read())
CANT_COMMENTS = 10
ACCESS_TOKEN = settings.get('access_token')
PAGE_ID = settings.get('id_fb_page')
graph = facebook.GraphAPI(access_token=ACCESS_TOKEN, version="3.1")
cuenta_likes = 0
lista_comments = []
flag = False
#In[4]
comments = graph.get_connections(PAGE_ID, 'feed')
paging = comments['paging']
comments = comments['data']
comments
#In[5]:
# Esto lo hice, porque al ejecutar likes_post = { ... } me daba{
# un error, de que había posts sin 'messages', se trata de publicaciones
# sin un cuerpo de texto. Sólo notificaciones o algo como eso ~}
with_no_message = filter( lambda x: 'message' not in x, comments )
with_no_message = tuple(with_no_message)
#In[6]:
#Convertimos esto a un bonito diccionario para poderle meter mano con pandas
likes_post = [
{
'id' : item['id'],
'post':item['message'] if 'message' in item else item['story'],
'users_likes':[],
'like':0,
'love':0,
'haha':0,
'angy':0,
'care':0,
'wow':0,
'sad':0,
'created_time':datetime.datetime.strptime( item['created_time'], "%Y-%m-%dT%H:%M:%S+0000" )
}
for item in comments
]
likes_post
posts = pd.DataFrame.from_dict(likes_post)
posts
#In[7]:
posts_with_reactions = posts.apply( get_reaction_from_post, axis='columns' )
posts_with_reactions
#In[8]:
posts_with_reactions.to_csv( 'salida_reactions.csv' )
posts_with_reactions.to_excel( 'salida_reactions.xlsx' )
# %%
|
UTF-8
|
Python
| false | false | 2,736 |
py
| 21 |
extracting_data_fb_simple.py
| 17 | 0.665934 | 0.650183 | 0 | 108 | 24.277778 | 132 |
frclasso/aulas_particulares_python
| 3,496,103,386,223 |
5368d1372902e0ceec3f98691d377f1eb7054d18
|
d02b09fb83041dbe10d34a74c152e87f08e451dc
|
/jessica_campos/aula3/script3.py
|
0857a1648f36cb80a0d0145ed88d93a1c419ebf7
|
[] |
no_license
|
https://github.com/frclasso/aulas_particulares_python
|
d9d6da8127a0d02f117373572438266c67fe79fe
|
f877cd9b9ce2732b0cbb72922d3079e1b0d944b0
|
refs/heads/main
| 2023-07-29T18:15:02.452427 | 2021-09-17T22:20:01 | 2021-09-17T22:20:01 | 395,457,184 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# comparacao
# retorna True ou False
valor_1 = 100
valor_2 = 200
# igualdade
print(valor_1 == valor_2)
# desigualdade
print(valor_1 != valor_2)
# maior que
print(valor_1 > valor_2)
# menor que
print(valor_1 < valor_2)
# maior ou igual
print(valor_2 >= 200.0)
# menor ou igual
print(valor_1 <= 100.01)
|
UTF-8
|
Python
| false | false | 309 |
py
| 55 |
script3.py
| 45 | 0.666667 | 0.579288 | 0 | 23 | 12.434783 | 26 |
Vitkof/MathSite_Django
| 19,009,525,265,184 |
48a7e49ab0c153439e83bda2b44d47383df80853
|
4960df7d4a13528ba15f439b05fad9e5adec872b
|
/ctmath/apps/univers/models.py
|
023d0ab65a46625167fd46bf01551011aae37768
|
[] |
no_license
|
https://github.com/Vitkof/MathSite_Django
|
1cc5327223949742bb6fd570302ab0d95d0de68c
|
5b149b7e417f80592b2199dcae8927f5c7096879
|
refs/heads/main
| 2023-02-04T17:40:31.661101 | 2020-12-27T22:00:36 | 2020-12-27T22:00:36 | 324,852,072 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class UniverModel(models.Model):
title = models.CharField(max_length=32)
location = models.CharField(max_length=32)
overall = models.FloatField()
teaching = models.FloatField()
research = models.FloatField()
citations = models.FloatField()
industry_income = models.FloatField()
international_outlook = models.FloatField()
number_students = models.PositiveIntegerField()
students_staff = models.FloatField()
percent_foreigners = models.PositiveSmallIntegerField()
gender_ratio = models.CharField(max_length=8)
def __str__(self):
return f"{self.id}_{self.title}"
|
UTF-8
|
Python
| false | false | 670 |
py
| 20 |
models.py
| 12 | 0.691045 | 0.683582 | 0 | 18 | 35.222222 | 59 |
nguyenductinh1998py/PythonLab
| 13,073,880,472,778 |
318d17c3f3db96445b570bb4fc2ccb889cb0a8d3
|
1c6a5eea04f1eddb584971d91679e0cb8ad93c16
|
/Python_Lab_03/02.py
|
daf0fedd35f64f246b95d08749bcd3acbe50902d
|
[] |
no_license
|
https://github.com/nguyenductinh1998py/PythonLab
|
1dbccd9ca4f77f70acd6db2c86af3463270a91a8
|
b9cf09150a7065d698e03189b8078c1c1f6e32f0
|
refs/heads/main
| 2023-02-08T09:28:03.435498 | 2021-01-04T14:09:35 | 2021-01-04T14:09:35 | 320,751,716 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#2d Lists
xs = [3, 1, 2]
print(xs, xs[2])
print(xs[-1])
xs[2] = 'foo'
print(xs)
xs.append('bar')
print(xs)
x = xs.pop()
print(x, xs)
nums = list(range(5))
print(nums)
print(nums[2:4])
print(nums[2:])
print(nums[:2])
print(nums[:])
print(nums[:-1])
nums[2:4] = [8, 9]
print(nums[2:4])
animals = ['cat', 'dog', 'monkey']
for animal in animals:
print(animal)
for idx, animal in enumerate(animals):
print(f'{idx + 1, animal}')
nums = [0, 1, 2, 3, 4]
squares = []
for x in nums:
squares.append(x ** 2)
print(squares)
squares = [x ** 2 for i in nums]
print(squares)
enven_squares = [x ** 2 for x in nums if x % 2 == 0]
print(enven_squares)
#2e Dictionaries
d = {'cat': 'cute', 'dog':'furry'}
print(d['cat'])
print('cat' in d)
d['fish'] = 'wet'
print(d['fish'])
print(d.get('monkey', 'N/A'))
print(d.get('fish', 'N/A'))
del d['fish']
print(d.get('fish', 'N/A'))
for animal in d:
legs = d[animal]
print(f'{animal}, {legs}')
for animal, legs in d.items():
print(f'{animal} , {legs}')
nums = {0, 1, 2, 3, 4}
even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0}
print(even_num_to_square)
#2f Sets
animals = {'cat', 'dog'}
print('cat' in animals)
print('fish' in animals)
animals.add('fish')
print('fish' in animals)
print(len(animals))
animals.add('cat')
print(len(animals))
animals.remove('cat')
print(len(animals))
for idx, animal in enumerate(animals):
print(f'{idx + 1}, { animal}')
from math import sqrt
nums = {int(sqrt(x)) for x in range(30)}
print(nums)
#2g Tuples
d = {(x, x + 1): x for x in range(10)}
t = (5, 6)
print(type(t))
print(d[t])
print(d[1, 2])
|
UTF-8
|
Python
| false | false | 1,608 |
py
| 73 |
02.py
| 21 | 0.599502 | 0.567786 | 0 | 85 | 17.929412 | 60 |
dungeonmaster51/commcare-hq
| 19,499,151,538,612 |
19d6598a935d3027daddeadccc883652c7fd6b13
|
81579ecd0678d652bbb57ff97529631fcfb74b12
|
/corehq/apps/case_search/exceptions.py
|
8c3f1551ca85967a52652a45fefb556d9aaad0f0
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/dungeonmaster51/commcare-hq
|
64fece73671b03c1bca48cb9d1a58764d92796ea
|
1c70ce416564efa496fb4ef6e9130c188aea0f40
|
refs/heads/master
| 2022-12-03T21:50:26.035495 | 2020-08-11T07:34:59 | 2020-08-11T07:34:59 | 279,546,551 | 1 | 0 |
BSD-3-Clause
| true | 2020-07-31T06:13:03 | 2020-07-14T09:51:32 | 2020-07-30T16:37:59 | 2020-07-31T06:03:08 | 562,669 | 1 | 0 | 0 |
Python
| false | false |
class CaseSearchException(Exception):
pass
class CaseSearchNotEnabledException(CaseSearchException):
pass
|
UTF-8
|
Python
| false | false | 116 |
py
| 2,296 |
exceptions.py
| 1,577 | 0.810345 | 0.810345 | 0 | 6 | 18.333333 | 57 |
gregVader/french-accidents-nosql
| 3,728,031,630,177 |
517fa3e3e758216be552c96b23e3531ece392c8f
|
e238f616a3c280c2e7f004c0448104ea3ed12673
|
/loaders/characteristicsLoader.py
|
652465bac0c753c08ba6194ce7d09f0210ca816e
|
[] |
no_license
|
https://github.com/gregVader/french-accidents-nosql
|
c5302fbda4064db4d65f5e1f97ac862f6e9ad574
|
78cc78fb595fc7f819480f20c05fa2f49893d994
|
refs/heads/main
| 2023-06-06T13:15:43.153509 | 2021-06-13T22:26:38 | 2021-06-13T22:26:38 | 371,169,408 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import threading
import pandas as pd
import os.path
import json
from json import JSONDecoder
from json import JSONEncoder
from datetime import datetime
csvCharacteristics = "../dataset/characteristics.csv"
csvHolidays = "../dataset/holidays.csv"
csvPlaces = "../dataset/places.csv"
csvInsee = "../dataset/code-postal-code-insee-2015.csv"
jsonCharacteristics = "json/characteristics.json"
jsonHolidays = "json/holidays.json"
jsonPlaces = "json/places.json"
jsonInsee = "json/insee.json"
class DateTimeDecoder(JSONDecoder):
def __init__(self, *args, **kargs):
JSONDecoder.__init__(self, object_hook=self.dict_to_object,
*args, **kargs)
def dict_to_object(self, d):
if '__type__' not in d:
return d
type = d.pop('__type__')
try:
dateobj = datetime(**d)
return dateobj
except:
d['__type__'] = type
return d
class DateTimeEncoder(JSONEncoder):
""" Instead of letting the default encoder convert datetime to string,
convert datetime objects into a dict, which can be decoded by the
DateTimeDecoder
"""
def default(self, obj):
if isinstance(obj, datetime):
return {
'__type__' : 'datetime',
'year' : obj.year,
'month' : obj.month,
'day' : obj.day,
'hour' : obj.hour,
'minute' : obj.minute,
'second' : obj.second,
'microsecond' : obj.microsecond,
}
else:
return JSONEncoder.default(self, obj)
def getInsee(inseeDf):
insee = {}
insee["insee"] = inseeDf["INSEE_COM"]
insee["com"] = inseeDf["NOM_COM"]
insee["dep"] = inseeDf["NOM_DEPT"]
insee["population"] = int(inseeDf["POPULATION"])
latlong = inseeDf["Geo Point"].split(',')
insee["lat"] = float(latlong[0])
insee["long"] = float(latlong[1])
return insee
def getInseeCode(dep, com):
if com == 'nan' or dep == 'nan':
return None
if dep == '201':
insee_dep = '2A'
elif dep == '202':
insee_dep = '2B'
elif dep in ['971', '972', '973', '974', '975', '976']:
insee_dep = '97'
else:
insee_dep = dep[:-1].zfill(2)
insee_com = com.zfill(3)
return insee_dep + insee_com
def getPlace(placeDf):
place = {}
# place["Num_Acc"] = int(placeDf["Num_Acc"])
if not pd.isna(placeDf["catr"]):
place["catr"] = int(placeDf["catr"])
if not pd.isna(placeDf["voie"]):
place["voie"] = placeDf["voie"]
#if not pd.isna(placeDf["v1"]):
# place["v1"] = int(placeDf["v1"])
if str(placeDf["circ"]) not in ["0", "0.0", "nan"]:
place["circ"] = int(placeDf["circ"])
if not pd.isna(placeDf["nbv"]):
place["nbv"] = int(placeDf["nbv"])
#if not pd.isna(placeDf["pr"]):
# place["pr"] = float(placeDf["pr"])
#if not pd.isna(placeDf["pr1"]):
# place["pr1"] = int(placeDf["pr1"])
#if str(placeDf["vosp"]) not in ["0", "0.0", "nan"]:
# place["vosp"] = int(placeDf["vosp"])
#if not pd.isna(placeDf["lartpc"]):
# place["lartpc"] = int(placeDf["lartpc"])
#if not pd.isna(placeDf["larrout"]):
# place["larrout"] = int(placeDf["larrout"])
if str(placeDf["infra"]) not in ["0", "0.0", "nan"]:
place["infra"] = int(placeDf["infra"])
if str(placeDf["situ"]) not in ["0", "0.0", "nan"]:
place["situ"] = int(placeDf["situ"])
#if not pd.isna(placeDf["env1"]):
# place["env1"] = int(placeDf["env1"])
condition = {}
if str(placeDf["prof"]) not in ["0", "0.0", "nan"]:
condition["prof"] = int(placeDf["prof"])
if str(placeDf["plan"]) not in ["0", "0.0", "nan"]:
condition["plan"] = int(placeDf["plan"])
if str(placeDf["surf"]) not in ["0", "0.0", "nan"]:
condition["surf"] = int(placeDf["surf"])
if condition:
place["condition"] = condition
return place
def getCharacteristic(dataFrame, holidaysMap, inseeMap, placesMap):
c = {}
c["Num_Acc"] = int(dataFrame["Num_Acc"])
hrmn = str(dataFrame["hrmn"]).zfill(4)
years = "20" + str(dataFrame["an"]).zfill(2)
hours = int(hrmn[0:-2])
minutes = int(hrmn[-2:])
date = datetime.strptime(years + '-' + str(dataFrame['mois']) + '-' + str(dataFrame["jour"]) + ' ' + str(hours) + ":" + str(minutes), '%Y-%m-%d %H:%M')
c["date"] = date
holiday = holidaysMap.get(date.strftime("%Y-%m-%d"))
if holiday is not None:
c["holiday"] = holiday
if not pd.isna(dataFrame["col"]):
c["col"] = int(dataFrame["col"])
if str(dataFrame["int"]) not in ['0', '0.0']:
c["int"] = int(dataFrame["int"])
condition = {}
condition["lum"] = int(dataFrame["lum"])
if not pd.isna(dataFrame["atm"]):
condition["atm"] = int(dataFrame["atm"])
c["condition"] = condition
# c["agg"] = int(dataFrame["agg"])
# c["adr"] = str(dataFrame["adr"])
location = None
insee_code = getInseeCode(str(dataFrame["dep"]), str(dataFrame["com"]))
if insee_code is not None:
location = inseeMap.get(insee_code)
#location = getLocation(str(int(dataFrame["dep"])), str(int(dataFrame["com"])))
if location is None:
location = {}
if str(dataFrame["gps"]) not in ['0', '0.0', '']:
location["gps"] = str(dataFrame["gps"])
#if str(dataFrame["lat"]) not in ['0', '', '0.0', 'nan']:
# location["lat"] = float(dataFrame["lat"] / 100000)
#if str(dataFrame["long"]) not in ['0', '0.0', '', 'nan']:
# location["long"] = float(dataFrame["long"] / 100000)
if location:
c["location"] = location
road = placesMap.get(str(dataFrame["Num_Acc"]))
if road is not None:
c["road"] = road
return c
def loadHolidays():
holidaysMap = {}
print("Started loading holidays")
if os.path.isfile(jsonHolidays):
with open(jsonHolidays) as infile:
holidaysMap = json.load(infile)
print("Holidays loaded from file")
else:
holidaysData = pd.read_csv(csvHolidays)
for _, rowHoliday in holidaysData.iterrows():
if holidaysMap.get(rowHoliday["ds"]) is None:
holidaysMap[rowHoliday["ds"]] = rowHoliday["holiday"]
with open(jsonHolidays, 'w') as outfile:
json.dump(holidaysMap, outfile)
print("Holidays loaded in memory and saved to file")
return holidaysMap
def loadPlaces():
placesMap = {}
print("Started loading places")
if os.path.isfile(jsonPlaces):
with open(jsonPlaces) as infile:
placesMap = json.load(infile)
print("Places loaded from file")
else:
placesData = pd.read_csv(csvPlaces)
for _, rowPlace in placesData.iterrows():
if placesMap.get(rowPlace["Num_Acc"]) is None:
placesMap[rowPlace["Num_Acc"]] = getPlace(rowPlace)
with open(jsonPlaces, 'w') as outfile:
json.dump(placesMap, outfile)
print("Places loaded in memory and saved to file")
return placesMap
def loadInsee():
inseeMap = {}
print("Started loading insee")
if os.path.isfile(jsonInsee):
with open(jsonInsee) as infile:
inseeMap = json.load(infile)
print("Insee loaded from file")
else:
inseePostCodeData = pd.read_csv(csvInsee, sep=";")
for _, rowInsee in inseePostCodeData.iterrows():
if inseeMap.get(rowInsee["INSEE_COM"]) is None:
inseeMap[rowInsee["INSEE_COM"]] = getInsee(rowInsee)
with open(jsonInsee, 'w') as outfile:
json.dump(inseeMap, outfile)
print("Insee loaded in memory and saved to file")
return inseeMap
def loadCharacteristics():
characteristicsMap = {}
print("Started loading characteristics")
if os.path.isfile(jsonCharacteristics):
with open(jsonCharacteristics) as infile:
characteristicsMap = json.load(infile, cls=DateTimeDecoder)
print("Characteristics loaded from file")
else:
characteristicsData = pd.read_csv(csvCharacteristics)
holidaysMap = loadHolidays()
inseeMap = loadInsee()
placesMap = loadPlaces()
for _, rowCharacteristic in characteristicsData.iterrows():
if characteristicsMap.get(rowCharacteristic["Num_Acc"]) is None:
characteristicsMap[rowCharacteristic["Num_Acc"]] = getCharacteristic(rowCharacteristic, holidaysMap, inseeMap, placesMap)
with open(jsonCharacteristics, 'w') as outfile:
json.dump(characteristicsMap, outfile, cls=DateTimeEncoder)
print("Characteristics loaded in memory and saved to file")
return characteristicsMap
|
UTF-8
|
Python
| false | false | 8,063 |
py
| 18 |
characteristicsLoader.py
| 15 | 0.633759 | 0.621605 | 0 | 265 | 29.426415 | 152 |
Shmuma/pytorch_tests
| 4,741,643,933,152 |
a7eab207a9f84dd443e71b7bd85be44b8f3ddb46
|
d18f2c659ab6290bca630aebf7da6e32b13fff17
|
/char_rnn_classification/char_rnn/data.py
|
e76ad6bb529bbaf66b1daf89d1e77a4549f516da
|
[] |
no_license
|
https://github.com/Shmuma/pytorch_tests
|
a30b47ef03e789261ad7063d1a6262295ab8369e
|
cded13e89559ad0e0b41ad8aad150469ac962dee
|
refs/heads/master
| 2022-12-24T00:40:27.564634 | 2017-10-12T06:22:39 | 2017-10-12T06:22:39 | 96,669,907 | 0 | 0 | null | false | 2022-12-08T00:00:35 | 2017-07-09T08:04:52 | 2017-07-29T14:57:41 | 2022-12-08T00:00:35 | 10,804 | 0 | 0 | 6 |
Python
| false | false |
import glob
import random
import os.path
import unicodedata
import string
ALL_ASCII_LETTERS = string.ascii_letters + " .,;'"
def all_filenames(data_dir="data"):
return glob.glob(os.path.join(data_dir, "*.txt"))
def read_file(file_path):
"""
Read data from file
:param file_path:
:return:
"""
with open(file_path, "rt", encoding='utf-8') as fd:
return list(map(str.rstrip, fd.readlines()))
def name_to_ascii(name):
"""
Convert unicode name to ASCII equivalent, method taken from http://stackoverflow.com/a/518232/2809427
:param name:
:return: normalized name string
"""
assert isinstance(name, str)
return ''.join(
c for c in unicodedata.normalize('NFD', name)
if unicodedata.category(c) != 'Mn'
and c in ALL_ASCII_LETTERS
)
def read_files(files, normalize_names=True):
"""
Read train data from given files list
:param files: list of files, one file per class
:param normalize_names: if True, read names will be normalized to ASCII
:return: dict with class -> [lines] data
"""
assert isinstance(files, list)
result = {}
for path in files:
class_name = os.path.splitext(os.path.basename(path))[0]
lines = read_file(path)
if normalize_names:
lines = list(map(name_to_ascii, lines))
result[class_name] = lines
return result
def read_data(normalize_names=True):
return read_files(all_filenames(), normalize_names=normalize_names)
def prepare_train_test(class_names, data_dict, test_ratio):
"""
Convert data dictionary in form class -> [samples] to train/test list of (name, class_idx) samples
:param class_names: list of class names
:param data_dict: data dict
:param test_ratio: float to use split data into train/test
:return: tuple of lists
"""
assert isinstance(class_names, list)
assert isinstance(data_dict, dict)
assert isinstance(test_ratio, float)
class_map = {name: idx for idx, name in enumerate(class_names)}
data = [
(name, class_map[class_name])
for class_name in class_names
for name in data_dict[class_name]
]
random.shuffle(data)
test_len = int(len(data) * test_ratio)
return data[:-test_len], data[-test_len:]
|
UTF-8
|
Python
| false | false | 2,316 |
py
| 46 |
data.py
| 29 | 0.645078 | 0.638601 | 0 | 85 | 26.247059 | 105 |
Mostofa-Najmus-Sakib/Applied-Algorithm
| 8,220,567,404,626 |
49f99de6eb6adde3c196309377effc9845452c21
|
f8e8e365c9cf58b61d72655bc2340baeaed5baff
|
/Leetcode/Python Solutions/Binary Trees/MaximumAverageSubtree.py
|
da1412cd36971bbda362196b84de9892df1bf4d8
|
[
"MIT"
] |
permissive
|
https://github.com/Mostofa-Najmus-Sakib/Applied-Algorithm
|
39a69f6b9ed113efe4a420d19cad79e0aa317637
|
bc656fd655617407856e0ce45b68585fa81c5035
|
refs/heads/master
| 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 |
MIT
| true | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | 2021-05-16T07:10:36 | 2021-05-16T07:10:34 | 840 | 0 | 0 | 0 | null | false | false |
"""
LeetCode Problem: 1120. Maximum Average Subtree
Link: https://leetcode.com/problems/maximum-average-subtree/
Language: Python
Written by: Mostofa Adib Shakib
Time Compplexity: O(n)
Space Complexity: O(n)
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maximumAverageSubtree(self, root: TreeNode) -> float:
def depthFirstSearch(node):
# If we encounter a null node
if not node:
return [0, 0, 0] # currentSum, numberOfNodes, maxAverage
# Get the maximum average possible for the left & right subtree
leftSubTree = depthFirstSearch(node.left)
rightSubTree = depthFirstSearch(node.right)
# Calculate the currentSum, numberOfNodes, and maxAverage
currentSum = leftSubTree[0] + rightSubTree[0] + node.val
numberOfNodes = leftSubTree[1] + rightSubTree[1] + 1
maxAverage = max(leftSubTree[2], rightSubTree[2], currentSum/numberOfNodes)
# Return the currentSum, numberOfNodes, and maxAverage for each subtree
return [currentSum, numberOfNodes, maxAverage]
return depthFirstSearch(root)[2]
|
UTF-8
|
Python
| false | false | 1,413 |
py
| 503 |
MaximumAverageSubtree.py
| 500 | 0.610757 | 0.599434 | 0 | 40 | 34.35 | 87 |
shevchenkodim/django2_1
| 3,410,204,071,404 |
8df20cdfe614925a4b867d940f6935d282ec7d81
|
ede51ecf2641440b04b264d2be02b7e651fa5c68
|
/vikupauto/urls.py
|
b9faa89213febbc091ae23faa5ddd3d1ae6627e3
|
[] |
no_license
|
https://github.com/shevchenkodim/django2_1
|
8764308b2f475a2fe7ee7483d1e92b58f3141fac
|
5576af559e41aaa3f1a0b58365d8efc46f47db26
|
refs/heads/master
| 2020-09-21T06:26:57.711599 | 2019-11-30T21:37:50 | 2019-11-30T21:37:50 | 224,709,388 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from . import views
from service.views import ServiceModel, ServiceModelView
urlpatterns = [
path('admin/', admin.site.urls),
path('feedback/', include('feedback.urls')),
path('', include('service.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
UTF-8
|
Python
| false | false | 552 |
py
| 3 |
urls.py
| 3 | 0.751812 | 0.751812 | 0 | 16 | 33.5 | 82 |
addy999/HemDetect
| 10,883,447,151,641 |
f06f7afc42667fd4d446396e646388a0bbf06674
|
d5f5bf59d3e03cecb2c826a774faae7c8e9f85e3
|
/Training/Scripts/tl.py
|
ed282e9f0f1effc9526a2cf7cf66a9a2cb78f326
|
[] |
no_license
|
https://github.com/addy999/HemDetect
|
84fb2a7e0bd41c9e9485a0a0c4021a3c0ccfe034
|
77bbe2758fb8c02ae2dfa13619d3659357a06724
|
refs/heads/master
| 2021-06-28T13:42:41.749945 | 2021-01-29T18:52:57 | 2021-01-29T18:52:57 | 212,179,860 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch.nn as nn
import torchvision.models
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torchvision import datasets
import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import gc
import os
# alexnet_model_1
alexnet_model_1 = torchvision.models.alexnet(pretrained=True)
alexnet_model_1.features[0] = nn.Conv2d(1, 64, kernel_size= 7, stride= 2, padding= 3)
alexnet_model_1.cuda()
for param in alexnet_model_1.parameters():
param.requires_grad = False
# alexnet_model_1
alexnet_model_3 = torchvision.models.alexnet(pretrained=True).cuda()
for param in alexnet_model_3.parameters():
param.requires_grad = False
# Resnet detector
resnet152_3 = torchvision.models.resnet152(pretrained=True).cuda()
# remove last FC layer
resnet152_3 = torch.nn.Sequential(*(list(resnet152_3.children())[:-1]))
for param in resnet152_3.parameters():
param.requires_grad = False
|
UTF-8
|
Python
| false | false | 983 |
py
| 112 |
tl.py
| 24 | 0.771109 | 0.734486 | 0 | 34 | 27.911765 | 85 |
nikkygleeson/secrets
| 9,156,870,314,818 |
5c1930467ba646556bd05ae84898ddaca079f08c
|
002956c05f53e5762c86f2f9e4710e8b1774a98d
|
/src/secrets/secrets.py
|
aa8abd00f80278aa58995745920e819889060be9
|
[
"MIT"
] |
permissive
|
https://github.com/nikkygleeson/secrets
|
956af2123f60a659c25b3a3c4733605ab904ad05
|
c70623a6bf724f286cfd2401bb79e7ae5250f150
|
refs/heads/master
| 2023-04-29T03:31:16.619543 | 2021-05-25T02:38:21 | 2021-05-25T02:38:21 | 361,356,661 | 0 | 1 | null | false | 2021-05-05T02:46:05 | 2021-04-25T07:08:50 | 2021-04-25T07:15:32 | 2021-05-05T02:46:04 | 8 | 0 | 1 | 0 | null | false | false |
def hex_cipher_encryptor(message: str) -> str:
"""Encrypts a given message using a Hex Cipher (converts characters to their hexadecimal equivalents).
Args:
message (str): Message to be encrypted.
Returns:
encrypted_message (str): Encrypted message.
"""
encrypted_message = "".join([hex(ord(char)) for char in message])
return encrypted_message
def hex_cipher_decryptor(encrypted_message: str) -> str:
"""Decrypts a message which has been encrypted using a Hex Cipher.
Args:
encrypted_message (str): Message to be decrypted.
Returns:
decrypted_message (str): Decrypted message.
"""
encrypted_characters = [encrypted_message[i : i + 4] for i in range(0, len(encrypted_message), 4)]
decrypted_message = "".join([chr(int(char, 16)) for char in encrypted_characters])
return decrypted_message
def atbash_cipher_encryptor_decryptor(message: str) -> str:
"""Encrypts and decrypts a given message using an AtBash Cipher (substitutes a letter of the plaintext
alphabet with the corresponding letter of the reversed alphabet).
a <-> z
b <-> y
c <-> x
Args:
message (str): Message to be encrypted or decrypted.
Returns:
encrypted_decrypted_message (str): Encrypted or decrypted message.
"""
N = ord("z") + ord("a")
encrypted_decrypted_message = "".join([chr(N - ord(char)) for char in message])
return encrypted_decrypted_message
def caesar_cipher_encryptor(key: int, message: str) -> str:
"""Encrypts a given message using a Caesar Cipher (substitutes a letter of the plaintext alphabet with a
letter located some fixed number of letters down the alphabet).
If key = 3:
a -> d
b -> e
z -> c
Args:
message (str): Message to be encrypted.
key (int): Shift.
Returns:
encrypted_message (str): Encrypted message.
"""
encrypted_message = "".join([chr(((ord(char) - ord("a") + key) % 26) + ord("a")) for char in message])
return encrypted_message
def caesar_cipher_decryptor(key: int, encrypted_message: str) -> str:
"""Decrypts a message which has been encrypted using a Caesar Cipher.
Args:
encrypted_message (str): Message to be decrypted.
key (int): Original shift.
Returns:
decrypted_message (str): Decrypted message.
"""
decrypted_message = "".join([chr(((ord(char) - ord("a") - key) % 26) + ord("a")) for char in encrypted_message])
return decrypted_message
def keyword_cipher_encryptor(key: str, message: str) -> str:
"""Encrypts a given message using a Keyword Cipher (replaces a consecutive set of letters in the alphabet
(from the beginning) with a chosen keyword with any repeated letters removed. All other characters which
do not appear in the keyword are placed directly after the keyword in alphabetical order to form the
cipher alphabet.
Args:
message (str): Message to be encrypted.
key (str): Keyword.
Returns:
encrypted_message (str): Encrypted message.
"""
# Remove duplicate characters in key
key = "".join(dict.fromkeys(key))
# Create string of all lowercase characters
chars = "".join([chr(char) for char in range(97, 123)])
# Create cipher key
for char in chars:
if char not in key:
key += char
index_values = [chars.index(char) for char in message]
encrypted_message = "".join(key[index] for index in index_values)
return encrypted_message
def keyword_cipher_decryptor(key: str, encrypted_message: str) -> str:
"""Decrypts a message which has been encrypted using a Keyword Cipher.
Args:
encrypted_message (str): Message to be decrypted.
key (str): Keyword.
Returns:
decrypted_message (str): Decrypted message.
"""
# Remove duplicate characters in key
key = "".join(dict.fromkeys(key))
# Create string of lowercase characters
chars = "".join([chr(char) for char in range(97, 123)])
# Create cipher key
for char in chars:
if char not in key:
key += char
index_values = [key.index(char) for char in encrypted_message]
decrypted_message = "".join(chars[index] for index in index_values)
return decrypted_message
def vigenere_cipher_encryptor(key: str, message: str) -> str:
"""Encrypts a given message using a Vigenere Cipher (a more complicated cipher involving a keyword, in
which the letter subsitution of a plaintext letter is not always the same).
Args:
message (str): Message to be encrypted.
key (str): Keyword.
Returns:
encrypted_message (str): Encrypted message.
"""
key = list(key)
if len(message) == len(key):
full_key = key
else:
for i in range(len(message) - len(key)):
key.append(key[i % len(key)])
full_key = "".join(key)
encrypted_message = []
for char in range(len(message)):
x = (ord(message[char]) - 2 * ord("a") + ord(full_key[char])) % 26
x += ord("a")
encrypted_message.append(chr(x))
encrypted_message = "".join(encrypted_message)
return encrypted_message
def vigenere_cipher_decryptor(key: str, encrypted_message: str) -> str:
"""Decrypts a message which has been encrypted using a Vigenere Cipher.
Args:
encrypted_message (str): Message to be decrypted.
key (str):
Returns:
decrypted_message (str): Decrypted message.
"""
key = list(key)
if len(encrypted_message) == len(key):
full_key = key
else:
for i in range(len(encrypted_message) - len(key)):
key.append(key[i % len(key)])
full_key = "".join(key)
decrypted_message = []
for char in range(len(encrypted_message)):
x = (ord(encrypted_message[char]) - ord(full_key[char])) % 26
x += ord("a")
decrypted_message.append(chr(x))
decrypted_message = "".join(decrypted_message)
return decrypted_message
|
UTF-8
|
Python
| false | false | 6,076 |
py
| 5 |
secrets.py
| 4 | 0.637591 | 0.633476 | 0 | 197 | 29.84264 | 116 |
sw-Jack/Python
| 16,561,393,922,169 |
764bd8023c1d51394841cabf1422a4c32555194d
|
2fae7d7ba6b2a4590fd4d90b1a491e919ec744de
|
/myPy/day05/day05-exam04-이분탐색with알고리즘&재귀호출.py
|
1ff0a0ea2d2b513b60e154044a4f63f0e09f8cbc
|
[] |
no_license
|
https://github.com/sw-Jack/Python
|
0952ded606f79c07c77a021b075e09b1e95caca4
|
c139301bf3a98f9b5289807ca8141febb947603b
|
refs/heads/master
| 2023-02-02T11:06:18.264799 | 2020-12-21T13:11:32 | 2020-12-21T13:11:32 | 290,997,124 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# 이분 탐색
# 자료가 이미 정렬되어 있다는 전제.
# 자료가 있으면 위치값을 알려주고 없으면 -1
# 알고리즘
# (1) 중간 위치를 찾는다
# (2) 찾는 값과 중간 위치의 값을 비교한다.
# (3) 같으면 해당 값의 위치를 반환한다
# (4) 찾는 값이 중간 값보다 크다면 중간 값 기준 오른쪽을 대상으로 다시 검색한다.
# (5) 찿는 값이 중간 값보다 작다면 중간 값 기준 왼쪽을 대상으로 다시 검색한다.
# 코드
def binary_search(lst,x) :
start = 0
end = len(lst) - 1
while start <= end :
mid = (start + end) // 2
if x == lst[mid] :
return mid
elif x > lst[mid] :
start = mid + 1
else :
end = mid - 1
return -1
data = [1,4,9,16,25,36,49.64,81]
print(binary_search(data,36))
print(binary_search(data,4))
print(binary_search(data,90))
# Quiz : 재귀 호출을 이용한 이분 탐색 구현
# (1) 주어진 탐색 대상이 비어있다면 탐색할 필요가 없다.(종료)
# (2) 찾는 값과 중간 값을 비교한다.
# (3) 찾는 값과 중간 값이 같으면 해당 값의 위치를 반환한다
# (4) 찾는 값이 중간 값보다 크다면 중간 값 기준 오른쪽을 대상으로 이분 탐색 재귀 호출을 실행한다.
# (5) 찿는 값이 중간 값보다 작다면 중간 값 기준 왼쪽을 대상으로 이분 탐색 재귀 호출을 실행한다.
# 코드
def binary_search_sub(a,x,start,end) :
if start > end :
return -1
mid = (start + end) // 2
if x == a[mid] :
return mid
elif x > a[mid] :
return binary_search_sub(a,x,mid+1,end)
else :
return binary_search_sub(a,x,start,mid-1)
return -1
def binary_search(a,x) :
return binary_search_sub(a,x,0,len(a)-1)
data = [1,4,9,16,25,36,49.64,81]
print(binary_search(data,36))
print(binary_search(data,4))
print(binary_search(data,90))
|
UTF-8
|
Python
| false | false | 2,011 |
py
| 70 |
day05-exam04-이분탐색with알고리즘&재귀호출.py
| 70 | 0.551844 | 0.507307 | 0 | 65 | 20.538462 | 59 |
hellais/ooni-probe
| 14,482,629,722,813 |
2786191e82d8d3071cde9a36d997e530868041b5
|
b156f49432b11047f5aa390dc93de1a9920ebf30
|
/ooni/director.py
|
21ac9fc252de51b52f1d8fe3aca6587d61ce72fd
|
[
"BSD-2-Clause-Views"
] |
permissive
|
https://github.com/hellais/ooni-probe
|
8b2fad5aa7eb1eda6a089f75164ca18ec80e7535
|
2764cc2bca9fc60f978b3361ab2e8de372bc5af6
|
refs/heads/master
| 2021-01-16T17:42:56.589164 | 2014-05-27T22:10:46 | 2014-05-27T22:10:46 | 3,614,533 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from ooni.managers import ReportEntryManager, MeasurementManager
from ooni.reporter import Report
from ooni.utils import log, pushFilenameStack
from ooni.utils.net import randomFreePort
from ooni.nettest import NetTest, getNetTestInformation
from ooni.settings import config
from ooni import errors
from txtorcon import TorConfig, TorState, launch_tor, build_tor_connection
from twisted.internet import defer, reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
class Director(object):
"""
Singleton object responsible for coordinating the Measurements Manager and the
Reporting Manager.
How this all looks like is as follows:
+------------------------------------------------+
| Director |<--+
+------------------------------------------------+ |
^ ^ |
| Measurement | |
+---------+ [---------] +--------------------+ |
| | | MeasurementManager | |
| NetTest | [---------] +--------------------+ |
| | | [----------------] | |
+---------+ [---------] | [----------------] | |
| | [----------------] | |
| +--------------------+ |
v |
+---------+ ReportEntry |
| | [---------] +--------------------+ |
| Report | | ReportEntryManager | |
| | [---------] +--------------------+ |
+---------+ | [----------------] | |
[---------] | [----------------] |--
| [----------------] |
+--------------------+
[------------] are Tasks
+------+
| | are TaskManagers
+------+
| |
+------+
+------+
| | are general purpose objects
+------+
"""
_scheduledTests = 0
# Only list NetTests belonging to these categories
categories = ['blocking', 'manipulation']
def __init__(self):
self.activeNetTests = []
self.measurementManager = MeasurementManager()
self.measurementManager.director = self
self.reportEntryManager = ReportEntryManager()
self.reportEntryManager.director = self
# Link the TaskManager's by least available slots.
self.measurementManager.child = self.reportEntryManager
# Notify the parent when tasks complete # XXX deadlock!?
self.reportEntryManager.parent = self.measurementManager
self.successfulMeasurements = 0
self.failedMeasurements = 0
self.totalMeasurements = 0
# The cumulative runtime of all the measurements
self.totalMeasurementRuntime = 0
self.failures = []
self.torControlProtocol = None
# This deferred is fired once all the measurements and their reporting
# tasks are completed.
self.allTestsDone = defer.Deferred()
self.sniffer = None
def getNetTests(self):
nettests = {}
def is_nettest(filename):
return not filename == '__init__.py' \
and filename.endswith('.py')
for category in self.categories:
dirname = os.path.join(config.nettest_directory, category)
# print path to all filenames.
for filename in os.listdir(dirname):
if is_nettest(filename):
net_test_file = os.path.join(dirname, filename)
nettest = getNetTestInformation(net_test_file)
nettest['category'] = category.replace('/', '')
if nettest['id'] in nettests:
log.err("Found a two tests with the same name %s, %s" %
(net_test_file, nettests[nettest['id']]['path']))
else:
category = dirname.replace(config.nettest_directory, '')
nettests[nettest['id']] = nettest
return nettests
@defer.inlineCallbacks
def start(self, start_tor=False):
self.netTests = self.getNetTests()
if config.advanced.start_tor and start_tor:
yield self.startTor()
elif config.tor.control_port:
log.msg("Connecting to Tor Control Port...")
yield self.getTorState()
if config.global_options['no-geoip']:
aux = [False]
if config.global_options.get('annotations') is not None:
annotations = [k.lower() for k in config.global_options['annotations'].keys()]
aux = map(lambda x: x in annotations, ["city", "country", "asn"])
if not all(aux):
log.msg("You should add annotations for the country, city and ASN")
else:
yield config.probe_ip.lookup()
@property
def measurementSuccessRatio(self):
if self.totalMeasurements == 0:
return 0
return self.successfulMeasurements / self.totalMeasurements
@property
def measurementFailureRatio(self):
if self.totalMeasurements == 0:
return 0
return self.failedMeasurements / self.totalMeasurements
@property
def measurementSuccessRate(self):
"""
The speed at which tests are succeeding globally.
This means that fast tests that perform a lot of measurements will
impact this value quite heavily.
"""
if self.totalMeasurementRuntime == 0:
return 0
return self.successfulMeasurements / self.totalMeasurementRuntime
@property
def measurementFailureRate(self):
"""
The speed at which tests are failing globally.
"""
if self.totalMeasurementRuntime == 0:
return 0
return self.failedMeasurements / self.totalMeasurementRuntime
def measurementTimedOut(self, measurement):
"""
This gets called every time a measurement times out independenty from
the fact that it gets re-scheduled or not.
"""
pass
def measurementStarted(self, measurement):
self.totalMeasurements += 1
def measurementSucceeded(self, result, measurement):
log.debug("Successfully completed measurement: %s" % measurement)
self.totalMeasurementRuntime += measurement.runtime
self.successfulMeasurements += 1
measurement.result = result
return measurement
def measurementFailed(self, failure, measurement):
log.msg("Failed doing measurement: %s" % measurement)
self.totalMeasurementRuntime += measurement.runtime
self.failedMeasurements += 1
self.failures.append((failure, measurement))
measurement.result = failure
return measurement
def reporterFailed(self, failure, net_test):
"""
This gets called every time a reporter is failing and has been removed
from the reporters of a NetTest.
Once a report has failed to be created that net_test will never use the
reporter again.
XXX hook some logic here.
note: failure contains an extra attribute called failure.reporter
"""
pass
def netTestDone(self, net_test):
self.activeNetTests.remove(net_test)
if len(self.activeNetTests) == 0:
self.allTestsDone.callback(None)
@defer.inlineCallbacks
def startNetTest(self, net_test_loader, reporters):
"""
Create the Report for the NetTest and start the report NetTest.
Args:
net_test_loader:
an instance of :class:ooni.nettest.NetTestLoader
"""
if self.allTestsDone.called:
self.allTestsDone = defer.Deferred()
if config.privacy.includepcap:
if not config.reports.pcap:
config.reports.pcap = config.generate_pcap_filename(net_test_loader.testDetails)
self.startSniffing()
report = Report(reporters, self.reportEntryManager)
net_test = NetTest(net_test_loader, report)
net_test.director = self
yield net_test.report.open()
yield net_test.initializeInputProcessor()
try:
self.activeNetTests.append(net_test)
self.measurementManager.schedule(net_test.generateMeasurements())
yield net_test.done
yield report.close()
finally:
self.netTestDone(net_test)
def startSniffing(self):
""" Start sniffing with Scapy. Exits if required privileges (root) are not
available.
"""
from ooni.utils.txscapy import ScapyFactory, ScapySniffer
config.scapyFactory = ScapyFactory(config.advanced.interface)
if os.path.exists(config.reports.pcap):
log.msg("Report PCAP already exists with filename %s" % config.reports.pcap)
log.msg("Renaming files with such name...")
pushFilenameStack(config.reports.pcap)
if self.sniffer:
config.scapyFactory.unRegisterProtocol(self.sniffer)
self.sniffer = ScapySniffer(config.reports.pcap)
config.scapyFactory.registerProtocol(self.sniffer)
log.msg("Starting packet capture to: %s" % config.reports.pcap)
@defer.inlineCallbacks
def getTorState(self):
connection = TCP4ClientEndpoint(reactor, '127.0.0.1',
config.tor.control_port)
config.tor_state = yield build_tor_connection(connection)
def startTor(self):
""" Starts Tor
Launches a Tor with :param: socks_port :param: control_port
:param: tor_binary set in ooniprobe.conf
"""
log.msg("Starting Tor...")
@defer.inlineCallbacks
def state_complete(state):
config.tor_state = state
log.msg("Successfully bootstrapped Tor")
log.debug("We now have the following circuits: ")
for circuit in state.circuits.values():
log.debug(" * %s" % circuit)
socks_port = yield state.protocol.get_conf("SocksPort")
control_port = yield state.protocol.get_conf("ControlPort")
config.tor.socks_port = int(socks_port.values()[0])
config.tor.control_port = int(control_port.values()[0])
def setup_failed(failure):
log.exception(failure)
raise errors.UnableToStartTor
def setup_complete(proto):
"""
Called when we read from stdout that Tor has reached 100%.
"""
log.debug("Building a TorState")
config.tor.protocol = proto
state = TorState(proto.tor_protocol)
state.post_bootstrap.addCallback(state_complete)
state.post_bootstrap.addErrback(setup_failed)
return state.post_bootstrap
def updates(prog, tag, summary):
log.msg("%d%%: %s" % (prog, summary))
tor_config = TorConfig()
if config.tor.control_port:
tor_config.ControlPort = config.tor.control_port
if config.tor.socks_port:
tor_config.SocksPort = config.tor.socks_port
if config.tor.data_dir:
data_dir = os.path.expanduser(config.tor.data_dir)
if not os.path.exists(data_dir):
log.msg("%s does not exist. Creating it." % data_dir)
os.makedirs(data_dir)
tor_config.DataDirectory = data_dir
if config.tor.bridges:
tor_config.UseBridges = 1
if config.advanced.obfsproxy_binary:
tor_config.ClientTransportPlugin = \
'obfs2,obfs3 exec %s managed' % \
config.advanced.obfsproxy_binary
bridges = []
with open(config.tor.bridges) as f:
for bridge in f:
if 'obfs' in bridge:
if config.advanced.obfsproxy_binary:
bridges.append(bridge.strip())
else:
bridges.append(bridge.strip())
tor_config.Bridge = bridges
if config.tor.torrc:
for i in config.tor.torrc.keys():
setattr(tor_config, i, config.tor.torrc[i])
tor_config.save()
if not hasattr(tor_config,'ControlPort'):
control_port = int(randomFreePort())
tor_config.ControlPort = control_port
config.tor.control_port = control_port
if not hasattr(tor_config,'SocksPort'):
socks_port = int(randomFreePort())
tor_config.SocksPort = socks_port
config.tor.socks_port = socks_port
tor_config.save()
log.debug("Setting control port as %s" % tor_config.ControlPort)
log.debug("Setting SOCKS port as %s" % tor_config.SocksPort)
if config.advanced.tor_binary:
d = launch_tor(tor_config, reactor,
tor_binary=config.advanced.tor_binary,
progress_updates=updates)
else:
d = launch_tor(tor_config, reactor,
progress_updates=updates)
d.addCallback(setup_complete)
d.addErrback(setup_failed)
return d
|
UTF-8
|
Python
| false | false | 13,480 |
py
| 103 |
director.py
| 73 | 0.555341 | 0.552893 | 0 | 373 | 35.13941 | 96 |
juanmacugat/google-ads-python
| 17,497,696,782,872 |
a30f9450589891dbc20625023f374c3d090a90b3
|
555b9f764d9bca5232360979460bc35c2f5ad424
|
/examples/billing/get_billing_setup.py
|
34f1109f8fe2deb1285c7be7ff9f6402d1762502
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
https://github.com/juanmacugat/google-ads-python
|
b50256163782bc0223bcd8b29f789d74f4cfad05
|
0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a
|
refs/heads/master
| 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 |
Apache-2.0
| true | 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | 2020-03-05T16:14:00 | 2020-03-05T16:13:58 | 3,865 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets all billing setup objects available for the specified customer ID."""
import argparse
import sys
import google.ads.google_ads.client
_DEFAULT_PAGE_SIZE = 1000
def main(client, customer_id, page_size):
ga_service = client.get_service('GoogleAdsService', version='v2')
query = (
'SELECT billing_setup.id, billing_setup.status, '
'billing_setup.payments_account, '
'billing_setup.payments_account_info.payments_account_id, '
'billing_setup.payments_account_info.payments_account_name, '
'billing_setup.payments_account_info.payments_profile_id, '
'billing_setup.payments_account_info.payments_profile_name, '
'billing_setup.payments_account_info.secondary_payments_profile_id '
'FROM billing_setup')
results = ga_service.search(customer_id, query=query, page_size=page_size)
try:
# Use the enum type to determine the enum name from the value.
billing_setup_status_enum = (
client.get_type('BillingSetupStatusEnum', version='v2')
.BillingSetupStatus)
print('Found the following billing setup results:')
for row in results:
billing_setup = row.billing_setup
payments_account_info = billing_setup.payments_account_info
print('Billing setup with ID "%s", status "%s", '
'payments_account "%s", payments_account_id "%s", '
'payments_account_name "%s", payments_profile_id "%s", '
'payments_profile_name "%s", '
'secondary_payments_profile_id "%s".'
% (billing_setup.id.value,
billing_setup_status_enum.Name(billing_setup.status),
billing_setup.payments_account.value,
payments_account_info.payments_account_id.value,
payments_account_info.payments_account_name.value,
payments_account_info.payments_profile_id.value,
payments_account_info.payments_profile_name.value,
payments_account_info.secondary_payments_profile_id.value))
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description='Lists all billing setup objects for specified customer.')
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, _DEFAULT_PAGE_SIZE)
|
UTF-8
|
Python
| false | false | 3,966 |
py
| 379 |
get_billing_setup.py
| 375 | 0.645487 | 0.641704 | 0 | 89 | 43.561798 | 80 |
hess8/pythonscripts
| 6,597,069,816,546 |
62bd819f6c05c400e9283977f4cfe30738e1b3fa
|
e6cc536dbb43a4dc0da4c21f76b5b61058f5e53e
|
/cluster_expansion/ceflashscripts/submitVasp.py
|
49049ee5a0b4532ed0472ff1738a7728d6ec93a4
|
[] |
no_license
|
https://github.com/hess8/pythonscripts
|
2f8df626f0c12baf205845ba41654b6cd59280b2
|
a048b60c96254576f32ec67164cc8ed64cca2486
|
refs/heads/master
| 2021-01-18T20:25:11.141331 | 2019-03-29T03:27:48 | 2019-03-29T03:27:48 | 6,807,456 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import time, os, subprocess
'''For each dir in jobs2run: copies POSCAR.orig to POSCAR, replaces kpoints file with correct mesh for POSCAR,
reads a jobfiles from the maindir,writes the structure number to the job name, and submits a vasp job
'''
#!/usr/bin/python
''' tests. '''
import sys,os
import numpy as np
#from kmeshroutines.py import *
################# script #######################
#maindir = '/fslhome/bch/cluster_expansion/alir/AFLOWDATAf1_50/AlIr/'
maindir = '/fslhome/bch/cluster_expansion/alir/AFLOWDATAf1_50e/test2/f5179/'
testfile = 'POSCAR'
reallatt = np.zeros((3,3))
os.chdir(maindir)
dirs= sorted([d for d in os.listdir(os.getcwd()) if os.path.isdir(d)])
for dir in dirs:
if testfile in os.listdir(dir):
currdir = maindir + dir+'/'
print dir #+ "************"
file1 = open(maindir+dir+'/'+testfile,'r')
poscar = file1.readlines()
file1.close()
if len(poscar) >0:
os.chdir(currdir)
# scale = np.sum(np.array(float(poscar[1])))
# N = np.rint(Nkppra/np.sum(np.array(poscar[5].split(),dtype=np.int16))).astype(int) # number of kpts desired
# reallatt[0,:] = np.array(poscar[2].split())
# reallatt[1,:] = np.array(poscar[3].split())
# reallatt[2,:] = np.array(poscar[4].split())
# reallatt = scale*reallatt.astype(np.float)
# reciplatt = 2*np.pi*np.transpose(np.linalg.inv(reallatt))
os.system('rm slurm*')
# subprocess.call(['rm', 'slurm*'])
subprocess.call(['rm', 'CHG*'])
subprocess.call(['rm', 'OUTCAR'])
subprocess.call(['sbatch', 'vaspjob'])
os.chdir(maindir)
# submit vasp job
print 'Done'
|
UTF-8
|
Python
| false | false | 1,822 |
py
| 181 |
submitVasp.py
| 160 | 0.569704 | 0.553787 | 0 | 48 | 36.895833 | 120 |
Lusenaka/SENDIT_COURIER
| 7,533,372,657,663 |
119a72541a0e41d958600b3f4953f1784531ce06
|
df1735507951f72f147a82b882996dd59d90da30
|
/app/api/v1/models.py
|
9ad9ada176a66aafee2cb01cfb77059458d07d05
|
[
"MIT"
] |
permissive
|
https://github.com/Lusenaka/SENDIT_COURIER
|
bab6f63a14635f8fd227400d2c817ab8f9ec9238
|
b87fcd6917751bacd82fbf103bfbf2f7b578601a
|
refs/heads/ft-correctionsbranch-161962988
| 2022-12-11T04:37:20.608098 | 2018-11-15T14:07:52 | 2018-11-15T14:07:52 | 157,015,049 | 0 | 0 |
MIT
| false | 2022-05-25T01:53:56 | 2018-11-10T19:39:00 | 2018-11-15T14:08:27 | 2022-05-25T01:53:56 | 22 | 0 | 0 | 9 |
Python
| false | false |
from flask import make_response, jsonify, request
from flask_restful import reqparse
users = [
{
"user_id" : 23,
"username" : "Alvin",
"email": "Alvin@gmail.com",
"default_location":"Kiambu",
"password":1234
},
{
"user_id" : 45,
"username" : "Charity",
"email": "Charity@gmail.com",
"default_location":"Kericho",
"password": 54321
}]
"""User roles"""
customer = "Normal user"
admin = "User administrator"
class Users(object):
"""Creating model for users"""
def __init__(self):
self.udb = users
self.user_id = len(self.udb)
self.role = customer
def create_user(self, username, email, default_location, password):
user = {
"user_id" :self.user_id + 1,
"username": username,
"email": email,
"default_location": default_location,
"password": password,
"role" : self.role
}
save_user = self.udb.append(user)
return save_user
def filter_user_detail(self,email):
"""Using list comprehension to get specific user email"""
user = [user for user in users if user['email']==email]
return user
def filter_password_detail(self,password):
passw = [passw for passw in users if passw['password']==password]
return passw
def user_login(self, email, password):
registered_user = Users.filter_user_detail(self, email)
registered_user2 = Users.filter_password_detail(self, password)
if not registered_user:
return make_response(jsonify({
"message" : "{} is not a registered user".format(email)
}), 201)
if registered_user:
return make_response(jsonify({
"message" : "login successful"
}), 201)
if not registered_user2:
return make_response(jsonify({
"message" : "{} is not a registered user".format(email)
}), 400)
elif registered_user2:
return make_response(jsonify({
"welcome" : "Login successful"
}))
for user in users:
if user["role"] == customer:
return {'Role' : 'You are a customer'}
elif user['role'] == admin:
return{'Role' : 'You are an admin'}
parcels = [
{
"order_id" : 56,
"current_location": "Nairobi",
"receiver_name": "Anne",
"receivers_location": "Mombasa",
"pickup_location": "Tuskeys",
"weight": 23,
"price": 3456,
"status": "pending"
},
{
"order_id" : 65,
"current_location": "Nairobi",
"receiver_name": "Moreen",
"receivers_location": "Kisumu",
"pickup_location": "Mama Ngina",
"weight": 23,
"price": 5443,
"status": "delivered"
}]
"""Order status after pickup"""
pending= "Your order is waiting to be sent"
on_transit= "in Transit"
delivered= "Delivered"
cancelled= "Cancelled"
class ParcelOrder(object):
"""Creating model for parcels"""
def __init__(self):
self.db = parcels
self.order_id = len(self.db)
self.status = pending
def new_parcel(self, current_location,receiver_name ,receivers_location, pickup_location, weight, price):
new_order_data = {
"order_id": self.order_id + 1,
"current_location": current_location,
"receiver_name":receiver_name,
"receivers_location":receivers_location,
"pickup_location": pickup_location,
"weight": weight,
"price": price,
"status": self.status
}
order = self.db.append(new_order_data)
return order
def parcels_list(self):
return self.db
def single_parcel(self, order_id):
for parcel in parcels:
if parcel["order_id"]== order_id:
return parcel
else:
return {'parcel': 'Is nowhere to be found'}, 404
def cancel_order(self, order_id):
for parcel in parcels:
if parcel["status"] == delivered:
return {'parcel': "This parcel was already delivered and therefore cannot be canceled"}
elif parcel['order_id'] == order_id:
parcel.update({"status": cancelled})
return {'parcel': 'Order Cancelled'}
def clear(self):
self.db = []
def get_orders_by_specific_user(self,receiver_name):
""""Return orders by specific user"""
user_orders = []
"""Iterate over a sequence"""
for parcel in parcels:
if (parcel['receiver_name'] == receiver_name):
user_orders.append(parcel)
return user_orders
return "Orders not found", 404
|
UTF-8
|
Python
| false | false | 4,863 |
py
| 14 |
models.py
| 11 | 0.549455 | 0.539379 | 0 | 157 | 29.974522 | 109 |
blacktyger/epicentral
| 2,276,332,683,365 |
d226c5de5b94cd12d21d6ec07a1599fb847bd7c6
|
ddfb5fb11498ed28c2e7bac4a32e7d965df3c842
|
/app/db.py
|
129ba435a041f0de16c3938d3da590564e2bfef0
|
[] |
no_license
|
https://github.com/blacktyger/epicentral
|
955e62d91132bfac197ce3fecbf9e76bd7aee99e
|
498e9168b6d6be5bf18b5b8d4f9b8d7dfe052e5f
|
refs/heads/master
| 2023-02-20T08:30:04.323734 | 2021-01-25T13:35:16 | 2021-01-25T13:35:16 | 332,756,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
from decimal import InvalidOperation, Decimal
from live_trader import citex_api as citex
import requests
from vitex_api_pkg import VitexRestApi
def d(value, places=8):
try:
return round(Decimal(value), places)
except (InvalidOperation, ValueError):
if value == '' or ' ' or []:
print(f'Empty string')
return Decimal(0)
else:
print(f'String should have numbers only')
pass
class DataBase:
def __init__(self):
print("--------------------- INITIALIZE DB ----------------")
self.btc_price = float(json.loads(requests.get("https://blockchain.info/ticker").content)['USD']['last'])
self.currency = self.currency_data()
self.epic = self.epic_data()
self.blocks = self.block_data()
self.epic_vs_usd = float(self.epic['usdt']['avg_price'])
self.epic_vs_btc = float(self.epic['btc']['avg_price'])
self.orderbook = self.get_orderbook()
def epicradar_api(self, query):
base = "https://epicradar.tech/api/"
url = f"{base}{query}"
return json.loads(requests.get(url).content)
def currency_data(self):
data = self.epicradar_api('currency')
output = {}
for x in data:
output[x['symbol']] = {'flag': x['flag'], 'country': x['country'],
'symbol': x['symbol'], 'price': x['price']}
return output
def epic_data(self):
data = self.epicradar_api('data')
output = {}
for x in data:
output[x['pair']] = x
return output
def block_data(self):
data = self.epicradar_api('block')
return data
def get_orderbook(self):
vitex_api = VitexRestApi()
vitex_epic_btc_orderbook = vitex_api.get_order_book_depth(symbol="EPIC-001_BTC-000")
citex_epic_btc_orderbook = citex.get_order_book('EPIC-BTC')
citex_epic_usdt_orderbook = citex.get_order_book('EPIC-USDT')
# self.orderbook = {
# 'vitex': {
# 'btc': {
# 'bids': vitex_epic_btc_orderbook['data']['asks'],
# 'asks': vitex_epic_btc_orderbook['data']['bids']
# }},
# 'citex': {
# 'btc': {
# 'bids': vitex_epic_btc_orderbook['data']['asks'],
# 'asks': vitex_epic_btc_orderbook['data']['asks'],
# },
# 'usd': {
# 'bids': vitex_epic_btc_orderbook['data']['asks'],
# 'asks': vitex_epic_btc_orderbook['data']['asks'],
# }}
# }
self.orderbook = {
'vitex': {
'btc': {
'bids': vitex_epic_btc_orderbook['data']['asks'],
'asks': vitex_epic_btc_orderbook['data']['bids']
}},
'citex': {
'btc': {
'bids': [[x['price'], x['quantity']] for x in citex_epic_btc_orderbook['data']['asks']],
'asks': [[x['price'], x['quantity']] for x in citex_epic_btc_orderbook['data']['bids']],
},
'usd': {
'bids': [[x['price'], x['quantity']] for x in citex_epic_usdt_orderbook['data']['asks']],
'asks': [[x['price'], x['quantity']] for x in citex_epic_usdt_orderbook['data']['bids']],
}}
}
return self.orderbook
db = DataBase()
|
UTF-8
|
Python
| false | false | 3,551 |
py
| 7 |
db.py
| 4 | 0.484934 | 0.482681 | 0 | 99 | 34.868687 | 113 |
Anjaan-g/learning-rest-framework
| 5,841,155,569,744 |
cfd49e68eb1433480373da71e398286612df3c86
|
c9cf349d854028b7f34b546dea3b3ffef8813713
|
/api_eg/api/urls.py
|
554dd68fe127592976d611b997eb494bb8c68dc4
|
[] |
no_license
|
https://github.com/Anjaan-g/learning-rest-framework
|
da4b7a5ae731513cd42d08c763618917ddbaf8ca
|
b2c619d0ab246c7afec9d29e695ad889e3d64e82
|
refs/heads/master
| 2020-05-26T21:58:21.729213 | 2019-05-25T08:48:21 | 2019-05-25T08:48:21 | 188,388,594 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('colleges', views.CollegeView)
router.register('jobs', views.JobView)
router.register('addresses', views.AddressView)
urlpatterns = [
path('', include(router.urls)),
]
|
UTF-8
|
Python
| false | false | 322 |
py
| 6 |
urls.py
| 6 | 0.748447 | 0.748447 | 0 | 14 | 22 | 47 |
vinaynv3/AlgorithmsDatastructures
| 5,291,399,717,580 |
40a98cfb140981a547428beaca84348fb9a3a23c
|
903981282e102869d8ec8fce99b2d9f176c2e922
|
/PgEx_1/circuit_connection.py
|
700ed01a37f54b3b44cfbafb09a1637c63c77a38
|
[] |
no_license
|
https://github.com/vinaynv3/AlgorithmsDatastructures
|
98faeb559144b6e6221efc64bdf06749a75bd1dc
|
b6a7d6db8230c86c5fddec593ba61989eadc72bb
|
refs/heads/master
| 2020-05-31T17:55:33.499985 | 2020-05-10T21:19:51 | 2020-05-10T21:19:51 | 190,420,671 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Connector class contains logic gate circuit design.
'''
#from logic_gates import Binary, Unary
class Connector:
#Constructor
def __init__(self, fromGate,toGate):
self.fromGate = fromGate
self.toGate = toGate
def execute(self):
result = self.fromGate.getOutput()
self.toGate.setNext(result)
final_result = self.toGate.getOutput()
return final_result
|
UTF-8
|
Python
| false | false | 416 |
py
| 14 |
circuit_connection.py
| 12 | 0.653846 | 0.653846 | 0 | 17 | 23.470588 | 51 |
MUKHERJEE3112/Letsupgrade-Python_b7
| 4,793,183,551,467 |
8a6957214fe8d2404ff9ab1d4ca0cb167c872689
|
beb4ea00895b368f7f77565aac59c5846f58b6cd
|
/DAY4.py
|
2d89b8aa37bcd307ed4a52536a8d3c003f334c6a
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/MUKHERJEE3112/Letsupgrade-Python_b7
|
93db84a5722e62702bafde6dd0dbf9c2fa9b26e3
|
9a60cada5fdda813d53956135acb975f19950191
|
refs/heads/master
| 2022-12-08T18:19:06.991025 | 2020-09-06T14:20:12 | 2020-09-06T14:20:12 | 293,288,262 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''WAP TO FIND THE ARMSTRONG NUMBER FROM 1042000 TO 702648265
AND EXIT THE LOOP AS SOON AS YOU ENCOUNTER THE FIRST ARMSTRONG NUMBER'''
#THE NUMBERS GIVEN BY THE USER IS STORED IN start AND end
start = int(input('ENTER THE STARTING VALUE :'))
end = int(input('ENTER THE ENDING VALUE :'))
for num in range(start,end):
sum,n = 0,num
while n!=0:
sum += ((n%10)**3)
n//=10
if num==sum :
print('THE NUMBER IS ARMSTRONG NUMBER',num)
break
else:
print('NO ARMSTRONG NUMBER')
|
UTF-8
|
Python
| false | false | 543 |
py
| 4 |
DAY4.py
| 3 | 0.615101 | 0.572744 | 0 | 16 | 31.5625 | 72 |
Angelpacman/Dataschool
| 18,339,510,354,916 |
0dd6947b3c112a9f2c2285e1a5479de8afca9bdf
|
85668736ab6034ced0afca7ce7320e403e8c688f
|
/scripts/19 Loc, iLoc, iax.py
|
a5a1fa2a19129eee8f2a071b17da822928002796
|
[] |
no_license
|
https://github.com/Angelpacman/Dataschool
|
e36b667e6677c767f96166f27a273762d91d1470
|
c30221337d7a8654b4e76a808ff5957308255cfb
|
refs/heads/master
| 2022-11-10T03:35:12.203224 | 2020-06-26T04:20:14 | 2020-06-26T04:20:14 | 268,156,678 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # How do i select multiple rows and columns?
# - Basicamente loc iloc y iax son metodos para seleccionar filas y columnas
# In[1]:
import pandas as pd
# In[2]:
#ufo = pd.read_csv('http://bit.ly/uforeports')
ufo = pd.read_csv('uforeports.csv')
ufo
# ## Loc
# - Loc sirve para filtrar filas y seleccionar columnas por label (cuando se habla de labels en pandas se refiere a que se selecciona par index cuando se trata de filas y se selecciona por nombre de columnas cuando se trata de las columnas)
#
# - La forma de usar Loc es la siguiente: .loc[ que filas quiero , que columnas quiero ]
# In[3]:
#En el ejemplo siguiente se usa el df ufo con la fila 0 y todas las columnas, que se representan con ":"
ufo.loc[0,:]
# In[4]:
#Aqui lo que buscamos es todas las filas de la columna "City"
ufo.loc[:,"City"]
# - Se pueden seleccionar filas especificas del df si se pasan al metodo loc en una lista:
# In[5]:
#En este caso elegimos de las filas con index = 0,1,2 todas las columnas ":"
ufo.loc[[0,1,2],:]
# - La forma de seleccionar varias filas que son consecutivas es declarando el intervalo:
# In[6]:
#Esta forma arroja un resultado que es equivalente a la sentencia anterior
ufo.loc[0:2,:]
# #### IMPORTANTE: hay que notar que la seleccion 0:2 no excluye el index = 2
# - Pasar a una lista todos los label que deseamos tambien lo podemos hacer en las columnas:
# In[7]:
#En este ejemplo logramos que seleccione todas las filas de las columnas "City" y "State"
ufo.loc[:,["City","State"]]
# - Tambien pasar las columnas de una forma consecutiva:
# In[8]:
#En este caso logramos obtener las filas de 0 a 2 todas las columnas que hay desde "City" hasta "State"
ufo.loc[0:2,"City":"State"]
# - Como nada de esta ha sido asignado a ninguna variable, se puede obtener el resutado anterior usando el metodo .drop() y retiraremos la columna "Time" que esta en axis=1
# In[9]:
ufo.head(3).drop('Time', axis = 1)
# - Filtrando los avistamientos en Oakland usando la serie de booleanos True que coincidan con la condicion:
# In[10]:
ufo[ufo.City == 'Oakland']
# - El mismo filtro lo podemos obterner con loc de una manera bastante similar:
# In[11]:
ufo.loc[ufo.City == 'Oakland', :]
# ## iLoc
# - iloc sirve para seleccionar filas y columnas pero usando una posición que es hecha por un numero entero:
# In[12]:
# De esta manera logramos seleccionar todas las filas de las columnas que se encuentran en la posicion 0 y 3
ufo.iloc[:, [0,3]]
# - Tambien se le puede pasar una lista a iloc asi como una serie consecutiva de labels
# In[13]:
ufo.iloc[:,0:3]
# #### Cabe mencionar que en este metodo si se excluye la columna que fijamos como limite (fila con posición = 3, es decir cuarta columna)
# In[14]:
#supongamos que queremos todas las columnas de la posición 0 a 2, entonces;
ufo.iloc[0:3, :]
# ## ix function (deprecated)
# - La funcion ix es obsoleta en pandas al menos desde version 1.0
# In[15]:
drinks = pd.read_csv('drinks.csv', index_col = 'country')
drinks
# In[16]:
drinks.ix['Albania', 0]
# In[17]:
drinks.ix(1,"beer_servings")
# In[19]:
drinks.ix['Albania':'Andorra',0:2]
# In[20]:
drinks.loc['Albania':'Andorra', 'beer_servings':'spirit_servings']
# In[21]:
drinks.iloc[1:4, 0:2]
# In[ ]:
|
UTF-8
|
Python
| false | false | 3,344 |
py
| 27 |
19 Loc, iLoc, iax.py
| 23 | 0.691709 | 0.669859 | 0 | 173 | 18.289017 | 241 |
LXL1314/d2l
| 18,640,158,073,443 |
1fd2c4ea64984d9ab16b85cc4ecf0b23dc2f69f6
|
7805346e05b101a02a1c2df7927577fc909a5ce6
|
/chapter3_deep-learning-basics/softmax_fashion_mnist.py
|
ab2c642421f786973abb58de219757497e5f331b
|
[] |
no_license
|
https://github.com/LXL1314/d2l
|
9a9819a576a79fb562518a3564ded36771addeaf
|
cfdec5c771cd655571ebf7a143407b0155fe9cc0
|
refs/heads/master
| 2022-10-18T18:08:13.971814 | 2019-08-04T02:59:42 | 2019-08-04T02:59:42 | 200,372,631 | 0 | 1 | null | false | 2022-10-09T00:35:06 | 2019-08-03T12:23:50 | 2019-08-04T03:03:23 | 2022-10-09T00:35:06 | 375,827 | 0 | 1 | 1 |
Jupyter Notebook
| false | false |
from mxnet import nd
import fashion_mnist as fm
batch_size = 256
train_iter, test_iter = fm.load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
W = nd.random.normal(scale=0.01, shape=(num_inputs, num_outputs))
b = nd.zeros(num_outputs)
W.attach_grad()
b.attach_grad()
def softmax(X):
X_exp = X.exp()
partition = X_exp.sum(axis=1, keepdims=True)
return X_exp/partition
def net(X):
return softmax(nd.dot(X.reshape((-1, num_inputs)), W) + b)
def cross_entropy(y_hat, y):
return -nd.pick(y_hat, y).log()
def accuracy(y_hat, y):
return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()
num_epochs, lr = 10, 0.01
fm.train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [W, b], lr)
for X, y in test_iter:
break
true_labels = fm.get_fashion_mnist_labels(y.asnumpy())
pre_labels = fm.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
title = [true + '\n' + pre for true, pre in zip(true_labels, pre_labels)]
fm.show_fashion_mnist(X[:10], title[:10])
|
UTF-8
|
Python
| false | false | 1,043 |
py
| 40 |
softmax_fashion_mnist.py
| 38 | 0.670182 | 0.644295 | 0 | 38 | 26.447368 | 91 |
deweerdt/condure
| 13,889,924,241,740 |
51aebbc1030545f6f22350386a49095124f72892
|
ff5c1d83496e34f8dce74d439e41798c25177c1b
|
/examples/streamhandler.py
|
c5a278fb328374d93dc9fe32c0793842a4826466
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/deweerdt/condure
|
abea0c88cbb71c37cf8384a9cc03e3ae67d8da4f
|
7f2f9e3987781ee31e3130f918d5bb0ad7769df4
|
refs/heads/master
| 2023-08-26T01:36:01.314990 | 2021-10-25T05:33:40 | 2021-10-25T05:33:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# this handler responds to every request with "hello world"
import os
import tnetstring
import zmq
instance_id = 'streamhandler.{}'.format(os.getpid()).encode('utf-8')
ctx = zmq.Context()
in_sock = ctx.socket(zmq.PULL)
in_sock.connect('ipc://client-out')
in_stream_sock = ctx.socket(zmq.ROUTER)
in_stream_sock.identity = instance_id
in_stream_sock.connect('ipc://client-out-stream')
out_sock = ctx.socket(zmq.PUB)
out_sock.connect('ipc://client-in')
poller = zmq.Poller()
poller.register(in_sock, zmq.POLLIN)
poller.register(in_stream_sock, zmq.POLLIN)
while True:
socks = dict(poller.poll(None))
if socks.get(in_sock) == zmq.POLLIN:
m_raw = in_sock.recv()
elif socks.get(in_stream_sock) == zmq.POLLIN:
m_list = in_stream_sock.recv_multipart()
m_raw = m_list[2]
else:
continue
req = tnetstring.loads(m_raw[1:])
print('IN {}'.format(req))
if req.get(b'type'):
# skip all non-data messages
continue
if req.get(b'uri', b'').startswith(b'ws'):
resp = {}
resp[b'from'] = instance_id
resp[b'id'] = req[b'id']
resp[b'seq'] = 0
resp[b'code'] = 101
resp[b'reason'] = b'Switching Protocols'
resp[b'credits'] = 1024
print('OUT {} {}'.format(req[b'from'], resp))
out_sock.send(req[b'from'] + b' T' + tnetstring.dumps(resp))
resp = {}
resp[b'from'] = instance_id
resp[b'id'] = req[b'id']
resp[b'seq'] = 1
resp[b'body'] = b'hello world'
print('OUT {} {}'.format(req[b'from'], resp))
out_sock.send(req[b'from'] + b' T' + tnetstring.dumps(resp))
resp = {}
resp[b'from'] = instance_id
resp[b'id'] = req[b'id']
resp[b'seq'] = 2
resp[b'type'] = b'close'
print('OUT {} {}'.format(req[b'from'], resp))
out_sock.send(req[b'from'] + b' T' + tnetstring.dumps(resp))
else:
resp = {}
resp[b'from'] = instance_id
resp[b'id'] = req[b'id']
resp[b'seq'] = 0
resp[b'code'] = 200
resp[b'reason'] = b'OK'
resp[b'headers'] = [[b'Content-Type', b'text/plain']]
resp[b'more'] = True
resp[b'credits'] = 1024
print('OUT {} {}'.format(req[b'from'], resp))
out_sock.send(req[b'from'] + b' T' + tnetstring.dumps(resp))
resp = {}
resp[b'from'] = instance_id
resp[b'id'] = req[b'id']
resp[b'seq'] = 1
resp[b'body'] = b'hello world\n'
print('OUT {} {}'.format(req[b'from'], resp))
out_sock.send(req[b'from'] + b' T' + tnetstring.dumps(resp))
|
UTF-8
|
Python
| false | false | 2,627 |
py
| 32 |
streamhandler.py
| 28 | 0.546631 | 0.538257 | 0 | 90 | 28.188889 | 68 |
jturibe/spicy_chorizo
| 11,458,972,766,111 |
10682108476a38523412eb658337d1969793db24
|
47b906d5ac12d831418c0fb5d8d1ea864df34621
|
/Server/server.py
|
078de4716018ac3c703d9edaaa4a4064a3d1fa38
|
[] |
no_license
|
https://github.com/jturibe/spicy_chorizo
|
f150f6b2e6cb41f28468e76457cd8152ad1caaaa
|
670c71a0be4a45302572ff1ad9022bbb988ff8bd
|
refs/heads/master
| 2022-04-03T16:58:08.078900 | 2020-02-13T13:09:49 | 2020-02-13T13:09:49 | 237,996,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import paho.mqtt.client as mqtt
import ssl
import json
from firebase import firebase
import datetime
import statistics
import numpy as np
import firebase_admin
from firebase_admin import db
from firebase_admin import credentials
from firebase_admin import messaging
import matplotlib.pyplot as plt
import seaborn
import base64
import time
import calendar
# Connect to Firebase
cred = credentials.Certificate("Firebase/spicychorizo-794f1-firebase-adminsdk-dckj3-acd1fd6dc2.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://spicychorizo-794f1.firebaseio.com'
})
now = datetime.datetime.now()
last_hour = now.hour
last_day = now.weekday()
#######################################################################################################
#-----------------------------Helper functions--------------------------------------------------------
#######################################################################################################
def list_to_dict(raw_data):
if raw_data is None:
return {}
if type(raw_data) == type({}):
return raw_data
dict = {}
for i in range(len(raw_data)):
if raw_data[i] != None:
if not np.isnan(raw_data[i]):
dict[str(i)] = raw_data[i]
return dict
def filter_none(lst):
res = []
for val in lst:
if val != None :
res.append(val)
return res
#######################################################################################################
#-----------------------------Graph creation--------------------------------------------------------
#######################################################################################################
def update_week_hour_graphs():
ref = db.reference('/l_week/week_hour_AVG')
#Retreive data from Firebase
humidity_values = list_to_dict(ref.child('humidity').get())
temperature_values = list_to_dict(ref.child('temperature').get())
ref = db.reference('/user_settings')
settings = ref.get()
if(settings is None):
settings = { 'humidity_max' : 70,
'humidity_min' : 60,
'temperature_max' : 18,
'temperature_min' : 7
}
average_week_hour_graph_temp(temperature_values, settings['temperature_max'], settings['temperature_min'])
average_week_hour_graph_hum(humidity_values, settings['humidity_max'], settings['humidity_min'])
def average_week_hour_graph_temp(temperature_values, lower_range, upper_range):
graph_labels = []
graph_data = []
if temperature_values is not None:
for i in range(24):
graph_labels.append(str(i) + ":00")
if str(i) in temperature_values:
graph_data.append(temperature_values[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(24):
graph_data.append(str(i) + ":00")
graph_data.append(np.NaN)
time_labels = []
graph_data = np.asarray(graph_data)
series = np.array(graph_data).astype(np.double)
mask = np.isfinite(series)
xi = np.arange(24)
allowed_ticks = [0, 4, 8, 12, 16, 20, 23]
time_labels = []
for tick in allowed_ticks:
time_labels.append(graph_labels[tick])
plot = plt.plot(xi[mask], graph_data[mask], linestyle='-', color='#000000',linewidth=2)
plt.xticks(allowed_ticks, time_labels)
plt.locator_params(axis='x', nbins=24)
#plt.fill_between(xi[mask], graph_data[mask], y2=0, facecolor='#4D071D', alpha=0.5)
upper_array = [upper_range]*24
lower_array = [lower_range]*24
upper = plt.plot(xi, upper_array, linestyle='--', color='#808080',linewidth=1.25)
lower = plt.plot(xi, lower_array, linestyle='--', color='#808080',linewidth=1.25)
plt.fill_between(xi, upper_array, y2=lower_array, facecolor='#C5FFA8', alpha=0.25)
plt.ylabel("Average Temperature(°C)", fontweight="medium", fontsize="12")
plt.xlabel("Time(daily)", fontweight="medium", fontsize="12")
plt.title("Temperature(°C) per hour, averaged over 7 days", fontweight="medium", fontsize="12")
axes = plt.gca()
axes.set_ylim([min([graph_data[mask].min()*0.8,lower_range*0.8]),max([graph_data[mask].max(),upper_range]) + abs(min([graph_data[mask].min()*0.2,lower_range*0.2]))])
seaborn.despine(left=True, bottom=True, right=True)
plt.savefig('average_week_hour_graph_temp.png')
plt.clf()
with open("average_week_hour_graph_temp.png", "rb") as img_file:
image_string = base64.b64encode(img_file.read())
ref_average_week_hour_graph_temp = db.reference('/graphs_temp')
ref_average_week_hour_graph_temp.update({'average_week_hour_graph': image_string.decode()})
def average_week_hour_graph_hum(humidity_values, lower_range, upper_range):
graph_labels = []
graph_data = []
if humidity_values is not None:
for i in range(24):
graph_labels.append(str(i) + ":00")
if str(i) in humidity_values:
graph_data.append(humidity_values[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(24):
graph_data.append(str(i) + ":00")
graph_data.append(np.NaN)
time_labels = []
graph_data = np.asarray(graph_data)
series = np.array(graph_data).astype(np.double)
mask = np.isfinite(series)
xi = np.arange(24)
allowed_ticks = [0, 4, 8, 12, 16, 20, 23]
time_labels = []
for tick in allowed_ticks:
time_labels.append(graph_labels[tick])
plot = plt.plot(xi[mask], graph_data[mask], linestyle='-', color='#000000',linewidth=2)
plt.xticks(allowed_ticks, time_labels)
plt.locator_params(axis='x', nbins=24)
#plt.fill_between(xi[mask], graph_data[mask], y2=0, facecolor='#4D071D', alpha=0.5)
upper_array = [upper_range]*24
lower_array = [lower_range]*24
upper = plt.plot(xi, upper_array, linestyle='--', color='#808080',linewidth=1.25)
lower = plt.plot(xi, lower_array, linestyle='--', color='#808080',linewidth=1.25)
plt.fill_between(xi, upper_array, y2=lower_array, facecolor='#C5FFA8', alpha=0.25)
plt.ylabel("Average Humidity(%)", fontweight="medium", fontsize="12")
plt.xlabel("Time(hourly)", fontweight="medium", fontsize="12")
plt.title("Relative Humidity(%) per hour, averaged over 7 days", fontweight="medium", fontsize="12")
axes = plt.gca()
axes.set_ylim([min([graph_data[mask].min()*0.8,lower_range*0.8]),max([graph_data[mask].max(),upper_range]) + abs(min([graph_data[mask].min()*0.2,lower_range*0.2]))])
seaborn.despine(left=True, bottom=True, right=True)
plt.savefig('average_week_hour_graph_hum.png')
plt.clf()
with open("average_week_hour_graph_hum.png", "rb") as img_file:
image_string = base64.b64encode(img_file.read())
ref_average_week_hour_graph_hum = db.reference('/graphs_hum')
ref_average_week_hour_graph_hum.update({'average_week_hour_graph': image_string.decode()})
def update_day_graphs(cur_day):
ref = db.reference('/l_week/week_AVG/')
#Retrieve data from firebase
humidity_values = list_to_dict(ref.child('humidity').get())
temperature_values = list_to_dict(ref.child('temperature').get())
ref = db.reference('/user_settings')
settings = ref.get()
if(settings is None):
settings = { 'humidity_max' : 70,
'humidity_min' : 60,
'temperature_max' : 18,
'temperature_min' : 7
}
average_day_graph_temp(cur_day, temperature_values, settings['temperature_min'], settings['temperature_max'])
average_day_graph_hum(cur_day, humidity_values, settings['humidity_min'], settings['humidity_max'])
def average_day_graph_temp(day, temperature_values, lower_range, upper_range):
graph_labels = []
graph_data = []
if temperature_values is not None:
for i in range(day + 1, 7):
graph_labels.append(calendar.day_name[i])
if str(i) in temperature_values:
graph_data.append(temperature_values[str(i)])
else:
graph_data.append(np.NaN)
for i in range(0, day + 1):
graph_labels.append(calendar.day_name[i])
if str(i) in temperature_values:
graph_data.append(temperature_values[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(7):
graph_data.append(calendar.day_name[i])
graph_data.append(np.NaN)
time_labels = []
graph_data = np.asarray(graph_data)
series = np.array(graph_data).astype(np.double)
mask = np.isfinite(series)
xi = np.arange(7)
allowed_ticks = [0, 1, 2, 3, 4, 5, 6]
time_labels = []
for tick in allowed_ticks:
time_labels.append(graph_labels[tick])
plot = plt.plot(xi[mask], graph_data[mask], linestyle='-', color='#000000',linewidth=2)
plt.xticks(allowed_ticks, time_labels)
plt.locator_params(axis='x', nbins=7)
#plt.fill_between(xi[mask], graph_data[mask], y2=0, facecolor='#4D071D', alpha=0.5)
upper_array = [upper_range]*7
lower_array = [lower_range]*7
upper = plt.plot(xi, upper_array, linestyle='--', color='#808080',linewidth=1.25)
lower = plt.plot(xi, lower_array, linestyle='--', color='#808080',linewidth=1.25)
plt.fill_between(xi, upper_array, y2=lower_array, facecolor='#C5FFA8', alpha=0.25)
plt.ylabel("Average Temperature(°C)", fontweight="medium", fontsize="12")
plt.xlabel("Time(daily)", fontweight="medium", fontsize="12")
plt.title("Average Temperature(°C) per day", fontweight="medium", fontsize="12")
axes = plt.gca()
axes.set_ylim([min([graph_data[mask].min()*0.8,lower_range*0.8]),max([graph_data[mask].max(),upper_range]) + abs(min([graph_data[mask].min()*0.2,lower_range*0.2]))])
seaborn.despine(left=True, bottom=True, right=True)
plt.savefig('average_day_graph_temp.png')
plt.clf()
with open("average_day_graph_temp.png", "rb") as img_file:
image_string = base64.b64encode(img_file.read())
ref_average_hour_graph_temp = db.reference('/graphs_temp')
ref_average_hour_graph_temp.update({'average_day_graph': image_string.decode()})
def average_day_graph_hum(day, humidity_values, lower_range, upper_range):
graph_labels = []
graph_data = []
if humidity_values is not None:
for i in range(day + 1, 7):
graph_labels.append(calendar.day_name[i])
if str(i) in humidity_values:
graph_data.append(humidity_values[str(i)])
else:
graph_data.append(np.NaN)
for i in range(0, day + 1):
graph_labels.append(calendar.day_name[i])
if str(i) in humidity_values:
graph_data.append(humidity_values[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(7):
graph_data.append(calendar.day_name[i])
graph_data.append(np.NaN)
time_labels = []
graph_data = np.asarray(graph_data)
series = np.array(graph_data).astype(np.double)
mask = np.isfinite(series)
xi = np.arange(7)
allowed_ticks = [0, 1, 2, 3, 4, 5, 6]
time_labels = []
for tick in allowed_ticks:
time_labels.append(graph_labels[tick])
plot = plt.plot(xi[mask], graph_data[mask], linestyle='-', color='#000000',linewidth=2)
plt.xticks(allowed_ticks, time_labels)
plt.locator_params(axis='x', nbins=7)
#plt.fill_between(xi[mask], graph_data[mask], y2=0, facecolor='#4D071D', alpha=0.5)
upper_array = [upper_range]*7
lower_array = [lower_range]*7
upper = plt.plot(xi, upper_array, linestyle='--', color='#808080',linewidth=1.25)
lower = plt.plot(xi, lower_array, linestyle='--', color='#808080',linewidth=1.25)
plt.fill_between(xi, upper_array, y2=lower_array, facecolor='#C5FFA8', alpha=0.25)
plt.ylabel("Average Humidity(%)", fontweight="medium", fontsize="12")
plt.xlabel("Time(daily)", fontweight="medium", fontsize="12")
plt.title("Average Relative Humidity(%) per day", fontweight="medium", fontsize="12")
axes = plt.gca()
axes.set_ylim([min([graph_data[mask].min()*0.8,lower_range*0.8]),max([graph_data[mask].max(),upper_range]) + abs(min([graph_data[mask].min()*0.2,lower_range*0.2]))])
seaborn.despine(left=True, bottom=True, right=True)
plt.savefig('average_day_graph_hum.png')
plt.clf()
with open("average_day_graph_hum.png", "rb") as img_file:
image_string = base64.b64encode(img_file.read())
ref_average_hour_graph_temp = db.reference('/graphs_hum')
ref_average_hour_graph_temp.update({'average_day_graph': image_string.decode()})
def update_hour_graphs(cur_hour, cur_day):
ref = db.reference('/l_week/hour_AVG/weekday_' + str(cur_day))
#Retreive data from Firebase
humidity_values_today = ref.child('humidity').get()
temperature_values_today = ref.child('temperature').get()
humidity_values_today = list_to_dict(humidity_values_today)
temperature_values_today = list_to_dict(temperature_values_today)
ref = db.reference('/l_week/hour_AVG/weekday_' + str((cur_day-1)%7))
#Retreive data from Firebase
humidity_values_yesterday = ref.child('humidity').get()
temperature_values_yesterday = ref.child('temperature').get()
humidity_values_yesterday = list_to_dict(humidity_values_yesterday)
temperature_values_yesterday = list_to_dict(temperature_values_yesterday)
ref = db.reference('/user_settings')
settings = ref.get()
if(settings is None):
settings = { 'humidity_max' : 70,
'humidity_min' : 60,
'temperature_max' : 18,
'temperature_min' : 7
}
average_hour_graph_temp(cur_hour, cur_day, temperature_values_yesterday, temperature_values_today, settings['temperature_max'], settings['temperature_min'])
average_hour_graph_hum(cur_hour, cur_day, humidity_values_yesterday, humidity_values_today, settings['humidity_max'], settings['humidity_min'])
def average_hour_graph_temp(hour, day, temp_values_yesterday, temp_values_today, upper_range, lower_range): #current hour, day of today
graph_labels = []
graph_data = []
if temp_values_yesterday is not None:
for i in range(hour + 1, 24): #take the required hours from yesterday
graph_labels.append(str(i) + ":00")
if str(i) in temp_values_yesterday:
graph_data.append(temp_values_yesterday[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(hour + 1, 24): #take the required hours from yesterday
graph_labels.append(str(i) + ":00")
graph_data.append(np.NaN)
if temp_values_today is not None:
for i in range(0, hour): #take the required hours from today
graph_labels.append(str(i) + ":00")
if str(i) in temp_values_today:
graph_data.append(temp_values_today[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(0, hour): #take the required hours from yesterday
graph_labels.append(str(i) + ":00")
graph_data.append(np.NaN)
cur_vals = db.reference('/current_measurement').get()
graph_labels.append(str(hour) + ":00")
graph_data.append(cur_vals['temperature'])
time_labels = []
graph_data = np.asarray(graph_data)
series = np.array(graph_data).astype(np.double)
mask = np.isfinite(series)
xi = np.arange(24)
allowed_ticks = [0, 4, 8, 12, 16, 20, 23]
time_labels = []
for tick in allowed_ticks:
time_labels.append(graph_labels[tick])
plot = plt.plot(xi[mask], graph_data[mask], linestyle='-', color='#000000',linewidth=2)
plt.xticks(allowed_ticks, time_labels)
plt.locator_params(axis='x', nbins=24)
#plt.fill_between(xi[mask], graph_data[mask], y2=0, facecolor='#4D071D', alpha=0.5)
upper_array = [upper_range]*24
lower_array = [lower_range]*24
upper = plt.plot(xi, upper_array, linestyle='--', color='#808080',linewidth=1.25)
lower = plt.plot(xi, lower_array, linestyle='--', color='#808080',linewidth=1.25)
plt.fill_between(xi, upper_array, y2=lower_array, facecolor='#C5FFA8', alpha=0.25)
plt.ylabel("Temperature(°C)", fontweight="medium", fontsize="12")
plt.xlabel("Time(hourly)", fontweight="medium", fontsize="12")
plt.title("Average Temperature(°C) per hour", fontweight="medium", fontsize="12")
axes = plt.gca()
axes.set_ylim([min([graph_data[mask].min()*0.8,lower_range*0.8]),max([graph_data[mask].max(),upper_range]) + abs(min([graph_data[mask].min()*0.2,lower_range*0.2]))])
seaborn.despine(left=True, bottom=True, right=True)
plt.savefig('average_hour_graph_temp.png')
plt.clf()
with open("average_hour_graph_temp.png", "rb") as img_file:
image_string = base64.b64encode(img_file.read())
ref_average_hour_graph_temp = db.reference('/graphs_temp')
ref_average_hour_graph_temp.update({'average_hour_graph': image_string.decode()})
def average_hour_graph_hum(hour, day, hum_values_yesterday, hum_values_today, upper_range, lower_range): #current hour, day of today
# ref_today = db.reference('/l_week/hour_AVG/weekday_' + str(day))
# ref_yesterday = db.reference('/l_week/hour_AVG/weekday_' + str((day-1)%7))
#
# light_values_today = ref_today.child('light').get()
# light_values_yesterday = ref_yesterday.child('light').get()
graph_labels = []
graph_data = []
# graph_data[:,:] = np.NaN
if hum_values_yesterday is not None:
for i in range(hour + 1, 24): #take the required hours from yesterday
graph_labels.append(str(i) + ":00")
if str(i) in hum_values_yesterday:
graph_data.append(hum_values_yesterday[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(hour + 1, 24): #take the required hours from yesterday
graph_labels.append(str(i) + ":00")
graph_data.append(np.NaN)
if hum_values_today is not None:
for i in range(0, hour): #take the required hours from today
graph_labels.append(str(i) + ":00")
if str(i) in hum_values_today:
graph_data.append(hum_values_today[str(i)])
else:
graph_data.append(np.NaN)
else:
for i in range(0, hour): #take the required hours from yesterday
graph_labels.append(str(i) + ":00")
graph_data.append(np.NaN)
cur_vals = db.reference('/current_measurement').get()
graph_labels.append(str(hour) + ":00")
graph_data.append(cur_vals['humidity'])
time_labels = []
graph_data = np.asarray(graph_data)
series = np.array(graph_data).astype(np.double)
mask = np.isfinite(series)
xi = np.arange(24)
allowed_ticks = [0, 4, 8, 12, 16, 12, 23]
time_labels = []
for tick in allowed_ticks:
time_labels.append(graph_labels[tick])
plot = plt.plot(xi[mask], graph_data[mask], linestyle='-', color='#000000',linewidth=2)
plt.xticks(allowed_ticks, time_labels)
plt.locator_params(axis='x', nbins=24)
#plt.fill_between(xi[mask], graph_data[mask], y2=0, facecolor='#4D071D', alpha=0.25)
upper_array = [upper_range]*24
lower_array = [lower_range]*24
upper = plt.plot(xi, upper_array, linestyle='--', color='#808080',linewidth=1.25)
lower = plt.plot(xi, lower_array, linestyle='--', color='#808080',linewidth=1.25)
plt.fill_between(xi, upper_array, y2=lower_array, facecolor='#C5FFA8', alpha=0.25)
plt.ylabel("Relative Humidity(%)", fontweight="medium", fontsize="12")
plt.xlabel("Time(hourly)", fontweight="medium", fontsize="12")
plt.title("Average Relative Humidity(%) per hour", fontweight="medium", fontsize="12")
axes = plt.gca()
axes.set_ylim([min([graph_data[mask].min()*0.8,lower_range*0.8]),max([graph_data[mask].max(),upper_range]) + abs(min([graph_data[mask].min()*0.2,lower_range*0.2]))])
seaborn.despine(left=True, bottom=True, right=True)
plt.savefig("average_hour_graph_hum.png")
plt.clf()
with open("average_hour_graph_hum.png", "rb") as img_file:
image_string = base64.b64encode(img_file.read())
ref_average_hour_graph_temp = db.reference('/graphs_hum')
ref_average_hour_graph_temp.update({'average_hour_graph': image_string.decode()})
#######################################################################################################
#-----------------------------Data processing--------------------------------------------------------
#######################################################################################################
#Calculate the average value for the hour
def average_hour(hour, day):
ref = db.reference('/l_hour/RAW/')
#Retreive RAW data about the last hour from Firebase
light_values = ref.child('light').get()
humidity_values = ref.child('humidity').get()
temperature_values = ref.child('temperature').get()
ref = db.reference('/l_week/hour_AVG/weekday_' + str(day))
if light_values is not None: #If there is any data, process it. Otherwise nothing happens
light_values = list_to_dict(light_values) #Ensure data is a dict
light_values = light_values.values() #Extract values
if len(light_values) != 0: #Honestly just a cautionary check
light_avg = statistics.mean(light_values) #Calculate the average
else:
light_avg = None
ref_light = ref.child('light')
ref_light.update({hour: light_avg})
if humidity_values is not None:
humidity_values = list_to_dict(humidity_values)
humidity_values = humidity_values.values()
if len(humidity_values) != 0:
humidity_avg = statistics.mean(humidity_values)
else:
humidity_avg = None
ref_humidity = ref.child('humidity')
ref_humidity.update({hour: humidity_avg})
if humidity_values is not None:
temperature_values = list_to_dict(temperature_values)
temperature_values = temperature_values.values()
if len(temperature_values) != 0:
temperature_avg = statistics.mean(temperature_values)
else:
temperature_avg = None
ref_temperature = ref.child('temperature')
ref_temperature.update({hour: temperature_avg})
#Calculate the average value for the day
def average_day(day):
ref = db.reference('/l_week/hour_AVG/weekday_' + str(day))
#Retreive data from Firebase
light_values = ref.child('light').get()
humidity_values = ref.child('humidity').get()
temperature_values = ref.child('temperature').get()
#Set upload reference
ref = db.reference('/l_week/week_AVG')
ref_light = ref.child('light')
ref_humidity = ref.child('humidity')
ref_temperature = ref.child('temperature')
#Check for None values
if light_values is not None:
light_values = list_to_dict(light_values)
light_values = light_values.values() #Extract values
if len(light_values) != 0:
light_avg = statistics.mean(light_values)
else:
light_avg = None
ref_light.update({day: light_avg})
if humidity_values is not None:
humidity_values = list_to_dict(humidity_values)
humidity_values = humidity_values.values() #Extract values
if len(humidity_values) != 0:
humidity_avg = statistics.mean(humidity_values)
else:
humidity_avg = None
ref_humidity.update({day: humidity_avg})
if temperature_values is not None:
temperature_values = list_to_dict(temperature_values)
temperature_values = temperature_values.values() #Extract values
if len(temperature_values) != 0:
temperature_avg = statistics.mean(temperature_values)
else:
temperature_avg = None
ref_temperature.update({day: temperature_avg})
#Calculate the average value for every hour over the period of the last week
def average_hour_over_day():
week_record = np.empty([3, 7, 24])
week_record[:,:,:] = np.NaN
ref = db.reference('/l_week/hour_AVG')
data = ref.get()
if data is not None:
for day in range(7):
if 'weekday_' + str(day) in data:
if 'light' in data['weekday_' + str(day)]:
light_values = data['weekday_' + str(day)]['light']
light_values = list_to_dict(light_values)
else:
light_values = None
if 'humidity' in data['weekday_' + str(day)]:
humidity_values = data['weekday_' + str(day)]['humidity']
humidity_values = list_to_dict(humidity_values)
else:
humidity_values = None
if 'temperature' in data['weekday_' + str(day)]:
temperature_values = data['weekday_' + str(day)]['temperature']
temperature_values = list_to_dict(temperature_values)
else:
temperature_values = None
for i in range(24):
if light_values is not None:
if str(i) in light_values:
week_record[0, day, i] = light_values[str(i)]
if humidity_values is not None:
if str(i) in humidity_values:
week_record[1, day, i] = humidity_values[str(i)]
if temperature_values is not None:
if str(i) in temperature_values:
week_record[2, day, i] = temperature_values[str(i)]
day_hour_avg = np.nanmean(week_record, axis=1)
ref = db.reference('/l_week/week_hour_AVG')
ref_light = ref.child('light')
ref_hum = ref.child('humidity')
ref_temp = ref.child('temperature')
light_values = list_to_dict(day_hour_avg[0])
humidity_values = list_to_dict(day_hour_avg[1])
temperature_values = list_to_dict(day_hour_avg[2])
ref_light.update(light_values)
ref_hum.update(humidity_values)
ref_temp.update(temperature_values)
def initialise_averages(): #helper function to flood the averages
for day in range(7):
print(day)
average_day(day)
#######################################################################################################
#-----------------------------Notifications--------------------------------------------------------
#######################################################################################################
def send_notifications(received_payload):
ref = db.reference('/notifis')
notif_data_temp = ref.child('temperature').get()
if(notif_data_temp is None):
notif_data_temp = { 'last_event_notified': 0,
'last_trend_notified': 0,
'trend_high_perc': 0,
'trend_low_perc': 0
}
notif_data_hum = ref.child('humidity').get()
if(notif_data_hum is None):
notif_data_hum = { 'last_event_notified': 0,
'last_trend_notified': 0,
'trend_high_perc': 0,
'trend_low_perc': 0
}
cur_humidity = received_payload['humidity']
cur_temperature = received_payload['temperature']
ref_settings = db.reference('/user_settings')
settings = ref_settings.get()
if(settings is None): #If the re is no user settings on the database. In the final
#product this would flag a seperate notification saying thet the temperature
#is different from the one we recomend
settings = { 'humidity_max' : 30,
'humidity_min' : 10,
'temperature_max' : 30,
'temperature_min' : 5
}
if(cur_temperature > settings['temperature_max']): #temperature is more than desired
if not(notif_data_temp['trend_high_perc'] >= 1):
notif_data_temp['trend_high_perc'] = notif_data_temp['trend_high_perc'] + 0.01666666666 #add to percentage of time spent hot
if not(notif_data_temp['trend_low_perc'] <= 0):
notif_data_temp['trend_low_perc'] = notif_data_temp['trend_low_perc'] - 0.01666666666 #add to percentage of time spent hot
if (time.time() - notif_data_temp['last_trend_notified']) > 300: #a full day or week (5 minutes for demo) has passed since the last trend notification
if(notif_data_temp['trend_high_perc']>= 0.5): #If there is a trend of high temperature in the last 5 min
topic = "event_updates"
message = messaging.Message(
notification=messaging.Notification(
title='Careful!',
body='Your wine has been overheating for too long!',
),
topic=topic,
)
response = messaging.send(message)
notif_data_temp['last_trend_notified'] = time.time()
elif(cur_temperature < settings['temperature_min']): #temperature is more less desired
if not(notif_data_temp['trend_low_perc'] >= 1):
notif_data_temp['trend_low_perc'] = notif_data_temp['trend_low_perc'] + 0.01666666666 #add to percentage of time spent hot
if not(notif_data_temp['trend_high_perc'] <= 0):
notif_data_temp['trend_high_perc'] = notif_data_temp['trend_high_perc'] - 0.01666666666 #add to percentage of time spent hot
if (time.time() - notif_data_hum['last_trend_notified']) > 300: #a full day or week (5 minutes for demo) has passed since the last trend notification
if(notif_data_temp['trend_low_perc']>= 0.5): #If there is a trend of high temperature in the last 5 min
topic = "event_updates"
message = messaging.Message(
notification=messaging.Notification(
title='Careful!',
body='Your wine has been freezing for too long!',
),
topic=topic,
)
response = messaging.send(message)
notif_data_temp['last_trend_notified'] = time.time()
else:
if not(notif_data_temp['trend_low_perc'] <= 0):
notif_data_temp['trend_low_perc'] = notif_data_temp['trend_low_perc'] - 0.01666666666 #add to percentage of time spent hot
if not(notif_data_temp['trend_high_perc'] <= 0):
notif_data_temp['trend_high_perc'] = notif_data_temp['trend_high_perc'] - 0.01666666666 #add to percentage of time spent hot
if(cur_humidity > settings['humidity_max']): #humidity is more than desired
if not(notif_data_hum['trend_high_perc'] >= 1):
notif_data_hum['trend_high_perc'] = notif_data_hum['trend_high_perc'] + 0.01666666666 #add to percentage of time spent too moist
if not(notif_data_hum['trend_low_perc'] <= 0):
notif_data_hum['trend_low_perc'] = notif_data_hum['trend_low_perc'] - 0.01666666666 #take away from percentage of time spent too dry
if (time.time() - notif_data_hum['last_trend_notified']) > 300: #a full day or week (5 minutes for demo) has passed since the last trend notification
if(notif_data_hum['trend_high_perc']>= 0.5): #If there is a trend of high humidity in the last 5 min
topic = "event_updates"
message = messaging.Message(
notification=messaging.Notification(
title='Careful!',
body='Your wine storage is too humid!',
),
topic=topic,
)
response = messaging.send(message)
notif_data_hum['last_trend_notified'] = time.time()
elif(cur_humidity < settings['humidity_min']): #humidity is less than desired
if not(notif_data_hum['trend_low_perc'] >= 1):
notif_data_hum['trend_low_perc'] = notif_data_hum['trend_low_perc'] + 0.01666666666 #add to percentage of time spent too dry
if not(notif_data_hum['trend_high_perc'] <= 0):
notif_data_hum['trend_high_perc'] = notif_data_hum['trend_high_perc'] - 0.01666666666 #take away from percentage of time spent too moist
if (time.time() - notif_data_hum['last_trend_notified']) > 300: #a full day or week (5 minutes for demo) has passed since the last trend notification
if(notif_data_hum['trend_low_perc']>= 0.5): #If there is a trend of high humidity in the last 5 min
topic = "event_updates"
message = messaging.Message(
notification=messaging.Notification(
title='Careful!',
body='Your wine storage is not humid enough!',
),
topic=topic,
)
response = messaging.send(message)
notif_data_hum['last_trend_notified'] = time.time()
else:
if not(notif_data_hum['trend_low_perc'] <= 0):
notif_data_hum['trend_low_perc'] = notif_data_hum['trend_low_perc'] - 0.01666666666 #take away from percentage of time spent too dry
if not(notif_data_hum['trend_high_perc'] <= 0):
notif_data_hum['trend_high_perc'] = notif_data_hum['trend_high_perc'] - 0.01666666666 #take away from percentage of time spent too moist
ref.child('temperature').update(notif_data_temp)
ref.child('humidity').update(notif_data_hum)
def send_to_topic():
topic = "event_updates"
message = messaging.Message(
notification=messaging.Notification(
title='Warning:',
body='Temperature is too high for your wine',
),
topic=topic,
)
# Send a message to the devices subscribed to the provided topic.
response = messaging.send(message)
# Response is a message ID string.
print('Successfully sent message:', response)
# [END send_to_topic]
#######################################################################################################
#-----------------------------On message--------------------------------------------------------
#######################################################################################################
def on_message(client, userdata, message):
#Global variables
global last_hour
global last_day
print('Received a message')
received_payload = json.loads(message.payload)
#Current time
now = datetime.datetime.now()
cur_minute = now.minute
cur_hour = now.hour
cur_day = now.weekday()
#Check if averages need to be updated
if(cur_hour != last_hour):
print('The hour has changed -> Updating averages and graphs')
average_hour(last_hour, last_day)
average_hour_over_day()
if(cur_day != last_day):
print('The day has changed -> Updating averages and graphs')
average_day(last_day)
# print('The hour has changed -> Updating averages and graphs')
# average_hour(last_hour, last_day)
# average_hour_over_day()
# print('The day has changed -> Updating averages and graphs')
# average_day(last_day)
# print('------Done-------')
#Retreive the individual values
cur_light = received_payload['light']
cur_humidity = received_payload['humidity']
cur_temperature = received_payload['temperature']
#Compress light value for user display
compressed_light = 'Bright'
if(cur_light<10):
compressed_light = 'Dark'
elif(cur_light<50):
compressed_light = 'Dim'
print(received_payload)
print('Light levels compressed')
#Upload to current data
ref = db.reference('/current_measurement')
ref.update({
'light': compressed_light,
'humidity': cur_humidity,
'temperature': cur_temperature
})
print('Current measurements updated')
#Unpload to hourly backlog
ref = db.reference('/l_hour/RAW')
ref.child('light').update({cur_minute: cur_light})
ref.child('humidity').update({cur_minute: cur_humidity})
ref.child('temperature').update({cur_minute: cur_temperature})
#Update last 24h average graphs since they contain last value as the current one
update_week_hour_graphs()
print('Updated Week hour graphs')
update_day_graphs(cur_day)
print('Updated last day average graphs')
update_hour_graphs(cur_hour, cur_day)
print('Updated last 24h average graphs')
send_notifications(received_payload)
print('Checked and sent notifications')
last_hour = cur_hour
last_day = cur_day
print('Finished On Message\n\n')
#######################################################################################################
#-----------------------------Main connections and actions on server startup--------------------------------------------------------
#######################################################################################################
client = mqtt.Client()
client.tls_set(ca_certs="mosquitto.org.crt", certfile="client.crt",keyfile="client.key", tls_version=ssl.PROTOCOL_TLSv1_2)
con_code = client.connect("test.mosquitto.org",port=8884)
if not con_code:
client.subscribe("IC.embedded/spicy_chorizo/#")
print("Subscribed to IC.embedded/spicy_chorizo/#")
else:
print(mqtt.error_string(con_code))
client.on_message = on_message
client.subscribe("IC.embedded/spicy_chorizo/#")
client.loop_forever()
print("Done")
# upper_range = 27
# lower_range = 18
# yesterday = list_to_dict([20,21,20,21,20,21,20,21,20,21,20,21,20,21])
# today = list_to_dict([20,21,20,21,20,21,20,21,20,21,20,21,20,21])
# average_hour_graph_temp(last_hour, last_day, yesterday, today, upper_range, lower_range)
# average_hour_graph_hum(last_hour, last_day, yesterday, today, upper_range, lower_range)
|
UTF-8
|
Python
| false | false | 38,324 |
py
| 12 |
server.py
| 6 | 0.586278 | 0.564408 | 0 | 934 | 40.025696 | 169 |
linhduongtuan/Kaggle-2020-Alaska2
| 3,633,542,352,357 |
600daa6944ca3d0538179c80f7b2c3ed97923e1f
|
c8026a5a31befa11c25e6d00d719af349635a4c5
|
/submissions/make_submissions_lgbm_gs.py
|
ade6cc9145643743c28e22c207aaaab9e8b8ee74
|
[
"MIT"
] |
permissive
|
https://github.com/linhduongtuan/Kaggle-2020-Alaska2
|
97cb890b8328d0e99d0a9225364f9b6af4f6a70c
|
3c1f5e8e564c9f04423beef69244fc74168f88ca
|
refs/heads/master
| 2023-01-11T09:10:15.601495 | 2020-08-09T13:23:17 | 2020-08-09T13:23:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
# For reading, visualizing, and preprocessing data
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from pytorch_toolbelt.utils import fs
from sklearn.metrics import make_scorer
from sklearn.model_selection import GroupKFold, RandomizedSearchCV, GridSearchCV
from sklearn.preprocessing import StandardScaler
from alaska2 import get_holdout, INPUT_IMAGE_KEY, get_test_dataset
from alaska2.metric import alaska_weighted_auc
from alaska2.submissions import parse_classifier_probas, sigmoid, parse_array, parse_and_softmax, get_x_y_for_stacking
from submissions.eval_tta import get_predictions_csv
from submissions.make_submissions_averaging import compute_checksum_v2
import lightgbm as lgb
def wauc_metric(y_true, y_pred):
wauc = alaska_weighted_auc(y_true, y_pred)
return ("wauc", wauc, True)
def main():
output_dir = os.path.dirname(__file__)
experiments = [
"G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16",
"G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16",
"G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16",
"G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16",
# "H_Jul12_18_42_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16",
#
"K_Jul17_17_09_nr_rgb_tf_efficientnet_b6_ns_mish_fold0_local_rank_0_fp16",
"J_Jul19_20_10_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16",
"H_Jul11_16_37_nr_rgb_tf_efficientnet_b7_ns_mish_fold2_local_rank_0_fp16",
"K_Jul18_16_41_nr_rgb_tf_efficientnet_b6_ns_mish_fold3_local_rank_0_fp16"
#
#
]
holdout_predictions = get_predictions_csv(experiments, "cauc", "holdout", "d4")
test_predictions = get_predictions_csv(experiments, "cauc", "test", "d4")
checksum = compute_checksum_v2(experiments)
holdout_ds = get_holdout("", features=[INPUT_IMAGE_KEY])
image_ids = [fs.id_from_fname(x) for x in holdout_ds.images]
quality_h = F.one_hot(torch.tensor(holdout_ds.quality).long(), 3).numpy().astype(np.float32)
test_ds = get_test_dataset("", features=[INPUT_IMAGE_KEY])
quality_t = F.one_hot(torch.tensor(test_ds.quality).long(), 3).numpy().astype(np.float32)
with_logits = True
x, y = get_x_y_for_stacking(holdout_predictions, with_logits=with_logits, tta_logits=with_logits)
# Force target to be binary
y = (y > 0).astype(int)
print(x.shape, y.shape)
x_test, _ = get_x_y_for_stacking(test_predictions, with_logits=with_logits, tta_logits=with_logits)
print(x_test.shape)
if True:
sc = StandardScaler()
x = sc.fit_transform(x)
x_test = sc.transform(x_test)
if False:
sc = PCA(n_components=16)
x = sc.fit_transform(x)
x_test = sc.transform(x_test)
if True:
x = np.column_stack([x, quality_h])
x_test = np.column_stack([x_test, quality_t])
group_kfold = GroupKFold(n_splits=5)
params = {
"boosting_type": ["gbdt", "dart", "rf", "goss"],
"num_leaves": [16, 32, 64, 128],
"reg_alpha": [0, 0.01, 0.1, 0.5],
"reg_lambda": [0, 0.01, 0.1, 0.5],
"learning_rate": [0.001, 0.01, 0.1, 0.5],
"n_estimators": [32, 64, 126, 512],
"max_depth": [2, 4, 8],
"min_child_samples": [20, 40, 80, 100],
}
lgb_estimator = lgb.LGBMClassifier(objective="binary", silent=True)
random_search = RandomizedSearchCV(
lgb_estimator,
param_distributions=params,
scoring=make_scorer(alaska_weighted_auc, greater_is_better=True, needs_proba=True),
n_jobs=3,
n_iter=50,
cv=group_kfold.split(x, y, groups=image_ids),
verbose=2,
random_state=42,
)
# Here we go
random_search.fit(x, y)
test_pred = random_search.predict_proba(x_test)[:, 1]
print(test_pred)
submit_fname = os.path.join(output_dir, f"lgbm_gs_{random_search.best_score_:.4f}_{checksum}.csv")
df = pd.read_csv(test_predictions[0]).rename(columns={"image_id": "Id"})
df["Label"] = test_pred
df[["Id", "Label"]].to_csv(submit_fname, index=False)
print("\n All results:")
print(random_search.cv_results_)
print("\n Best estimator:")
print(random_search.best_estimator_)
print(random_search.best_score_)
print("\n Best hyperparameters:")
print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv("lgbm-random-grid-search-results-01.csv", index=False)
# print(model.feature_importances_)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 4,657 |
py
| 218 |
make_submissions_lgbm_gs.py
| 161 | 0.649775 | 0.610049 | 0 | 132 | 34.280303 | 118 |
reparadocs/ExtraLettuce
| 9,431,748,194,933 |
3ea5d7e57bbbac18f09cdac3e2a4b921e7244ec7
|
fe19fe1495b93b9f11abe1b41d088c99fae28895
|
/accounts/migrations/0005_auto_20160117_0512.py
|
d61c502eec683c1049c3e48ddd19db828aac2d4a
|
[] |
no_license
|
https://github.com/reparadocs/ExtraLettuce
|
c464e5d2c100c4e2ba6a3e97e17d52f89a5b94f1
|
5e9f9f95d64e05b494f019ad6f8c4455ada51abe
|
refs/heads/master
| 2021-01-09T06:40:30.460078 | 2016-01-17T21:56:11 | 2016-01-17T21:56:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_account_token'),
]
operations = [
migrations.CreateModel(
name='Goal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('amount', models.IntegerField()),
],
),
migrations.AddField(
model_name='account',
name='bank_amount',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='account',
name='bank_name',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='goal',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
|
UTF-8
|
Python
| false | false | 1,094 |
py
| 10 |
0005_auto_20160117_0512.py
| 6 | 0.542962 | 0.531993 | 0 | 38 | 27.789474 | 114 |
alireza-ansaripour/quizyar
| 10,256,381,933,119 |
01eee450f4f3f0b60b30dea310ad8f5526b79ca6
|
9f604e9700f906f95d180055709e123f2edfb3de
|
/Quizyar/quiz/views.py
|
b336a39ba2091ddf5e5c0a4a74f2ac5c209dd3c9
|
[] |
no_license
|
https://github.com/alireza-ansaripour/quizyar
|
496c4ecc09e895791df6c1b34d5a57439c202132
|
8eab7d74c79825908de2ceb76e8627fb3889d361
|
refs/heads/master
| 2016-09-06T05:27:42.591868 | 2015-09-17T07:14:54 | 2015-09-17T07:14:54 | 42,368,950 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.http import HttpResponse
import datetime
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.views import APIView
from Quizyar.models import Teacher, Tag, Student
from rest_framework import permissions
from Quizyar.quiz.forms import AddQuizForm
from Quizyar.quiz.models import Question, Quiz, Result, Analyse, Answer
from Quizyar.quiz.permissions import IsOwnerOrReadOnly
from Quizyar.quiz.serializers import QuestionSerializer, QuestionSerializerForQuiz
from django.utils.crypto import get_random_string
__author__ = 'alireza'
class addquiz(APIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
def get(self, request):
if request.user.is_staff:
form = AddQuizForm()
teacher = Teacher.objects.get(username=request.user.username)
tags = teacher.tag_set.all()
edit = True
return render(request, 'quiz/add_quiz_template.html', {'form': form, 'tags': tags, 'edit': edit})
else:
return render(request, "quiz/get_teacher_template.html")
def post(self, request, format=None):
if request.user.is_staff:
q = Quiz()
q.title = request.data['title']
q.time_to_show = request.data['time_to_show']
q.time_to_end = request.data['time_to_end']
q.teacher = Teacher.objects.get(username=request.user.username)
q.unique_id = get_random_string()
q.save()
return redirect('/accounts/profile/quiz')
else:
return redirect('/')
class quiz(APIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
def get(self, request):
print(request.user.username)
teacher = Teacher.objects.get(username=request.user.username)
tags = teacher.tag_set.all()
return render(request, 'quiz/quiz_template.html', {'tags': tags})
def post(self, request):
serializer = QuestionSerializer(data=request.data, many=True)# if the quiz contains many question
if serializer.is_valid():
try:
questions = serializer.data
teacher = Teacher.objects.get(username=request.user.username)
quiz = teacher.quiz_set.all()
quiz = quiz[len(quiz)-1]
i = 1
for question in questions:
q = Question.objects.get(unique_id=question['unique_id'])
q.text = question['text']
q.answer = question['answer']
q.choices = question['choices']
q.kind = question['kind']
q.save()
quiz.questions.add(q)
a = Analyse()
a.quiz = quiz
a.question_number = i
a.save()
i += 1
except Exception as e:
print(e)
return HttpResponse('quiz added')
class question(APIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
def get(self, request):
teacher = Teacher.objects.get(username=request.user.username)
tag = teacher.tag_set.filter(text=request.GET['tag'])
# print(request.GET['tag'])
try:
questions = Question.objects.filter(tag=tag)
serializer = QuestionSerializer(questions, many=True)
except Exception as e:
print(e)
return JSONResponse(serializer.data)
def post(self, request):
try:
serializer = QuestionSerializer(data=request.data)
if serializer.is_valid():
question = serializer.data
q = Question()
q.text = question['text']
q.answer = question['answer']
q.choices = question['choices']
q.kind = question['kind']
teacher = Teacher.objects.get(username=request.user.username)
quiz = teacher.quiz_set.all()[0]
q.quiz = quiz
q.tag = Tag.objects.get(text=question['tag'])
q.unique_id = get_random_string()
q.save()
else:
print(serializer.errors)
except Exception as e:
print(e)
return HttpResponse('done')
def put(self, request):
try:
serializer = QuestionSerializer(data=request.data)
if serializer.is_valid():
question = Question.objects.get(unique_id=serializer.data['unique_id'])
question.text = serializer.data['text']
question.kind = serializer.data['kind']
question.answer = serializer.data['answer']
question.choices = serializer.data['choices']
question.save()
else:
print(serializer.errors)
except Exception as e:
print(e)
return HttpResponse('done')
class tag(APIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
def get(self, request):
return HttpResponse('done')
@csrf_exempt
def post(self, request, format=None):
print('post')
print(request.data)
tag = Tag()
tag.text = request.data['text']
tag.teacher = Teacher.objects.get(username=request.user.username)
tag.save()
return HttpResponse('done')
class GetQuiz(APIView):
# @csrf_exempt
def post(self, request):
try:
unique_id = request.data['unique_id']
quiz = Quiz.objects.get(unique_id=unique_id)
questions = quiz.questions.all()
serializer = QuestionSerializer(questions, many=True)
return JSONResponse(serializer.data)
except Exception as e:
print(e)
class SubmitQuiz(APIView):
def post(self, request):
u_id = request.data['unique_id']
try:
quiz = Quiz.objects.get(unique_id=u_id)
questions = quiz.questions.all()
analyses = quiz.analyse_set.all()
serializer = QuestionSerializer(data=request.data['questions'], many=True)
numberOfQuestions = len(questions) * 3
score = 0
if serializer.is_valid():
answers = serializer.data
i = 0
for question in answers:
if question['kind'] == '1':
alanyse = analyses[i]
answer = questions[i].answer
if question['answer'] == 0:
alanyse.unanswered += 1
alanyse.save()
i += 1
continue
if question['answer'] == answer:
alanyse.answered += 1
alanyse.save()
score += 3
i += 1
if question['answer'] != answer:
alanyse.wrong += 1
alanyse.save()
score -= 1
i += 1
answer = Answer()
answer.quiz = quiz
answer.text = question['answer']
answer.student = Student.objects.get(username=request.user.username)
answer.save()
if question['kind'] == '2':
answer = Answer()
answer.quiz = quiz
answer.text = question['answer']
answer.student = Student.objects.get(username=request.user.username)
answer.save()
result = score/numberOfQuestions * 100
r = Result()
r.grade = result
r.quiz = quiz
r.student = Student.objects.get(username=request.user.username).first_name+' '+Student.objects.get(username=request.user.username).last_name
r.username = Student.objects.get(username=request.user.username).username
r.save()
return HttpResponse(score/numberOfQuestions)
except Exception as e:
print(e)
return HttpResponse(e)
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
|
UTF-8
|
Python
| false | false | 8,887 |
py
| 45 |
views.py
| 17 | 0.544166 | 0.54169 | 0 | 231 | 37.471861 | 156 |
qualorm/adventofcode
| 16,157,666,987,974 |
d0692245bd779e906c8a540e8d8a084dfddaa251
|
561fec987d13956f97579136fb18af62dd05f96b
|
/2020/18/18_1.py
|
b1970f420a331d5b11b1eb03e7ac14cd4ff4ca61
|
[] |
no_license
|
https://github.com/qualorm/adventofcode
|
a7f5052212b19017587bbf9180240e5425b8ee64
|
94b0dde342cd33400418a4ccd5258f620fe9d169
|
refs/heads/master
| 2023-04-22T07:34:28.816980 | 2021-12-14T12:16:48 | 2021-12-14T12:16:48 | 319,268,052 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solve(eq):
i = 0
nested = 0
if '(' in eq or ')' in eq:
for i in range(len(eq)):
if eq[i] == '(':
nested += 1
if nested == 1:
ind_left = i
if eq[i] == ')':
nested -= 1
if nested == 0:
ind_right = i
if ind_left == 0 and ind_right == len(eq)-1:
new_eq = eq[ ind_left+1 : ind_right ]
# print("SOLVING 1: ", new_eq)
return solve(new_eq)
elif ind_left == 0 and ind_right < len(eq)-1:
new_eq = solve(eq[ ind_left+1 : ind_right ]) + eq[ind_right+1:]
# print("SOLVING 2: ", new_eq)
return solve(new_eq)
elif ind_left > 0 and ind_right == len(eq)-1:
new_eq = eq[:ind_left] + solve(eq[ ind_left+1 : ind_right ])
# print("SOLVING 3: ", new_eq)
return solve(new_eq)
else:
new_eq = eq[:ind_left] + solve(eq[ ind_left+1 : ind_right ]) + eq[ind_right+1:]
# print("SOLVING 4: ", new_eq)
return solve(new_eq)
else:
for i in range(len(eq)):
if eq[i] == '*':
left_int = int(eq.split(' * ')[0])
if eq.count(' ') == 2:
right_int = int(eq.split(' * ')[1])
return str(left_int*right_int)
else:
right_int = int(eq.split(' * ')[1].split(' ')[0])
new_eq = str(left_int*right_int) + eq[i+2+len(str(right_int)):]
return solve(new_eq)
if eq[i] == '+':
left_int = int(eq.split(' + ')[0])
if eq.count(' ') == 2:
right_int = int(eq.split(' + ')[1])
return str(left_int+right_int)
else:
right_int = int(eq.split(' + ')[1].split(' ')[0])
new_eq = str(left_int+right_int) + eq[i+2+len(str(right_int)):]
return solve(new_eq)
with open("day18.txt","r") as file:
f = file.read().splitlines()
ans = 0
for eq in f:
# print("Equation: ", eq, "Answer: ", solve(eq))
int_ans = int(solve(eq))
ans += int_ans
print("FINAL ANSWER: ", ans)
|
UTF-8
|
Python
| false | false | 2,493 |
py
| 89 |
18_1.py
| 88 | 0.380265 | 0.365423 | 0 | 65 | 37.369231 | 104 |
ImDaeseong/VocaKnow
| 6,786,048,362,472 |
703950ad661179548a2accf6247216d81500729d
|
781ea21caed5a6623af1d2cee91b8fe12eb84e3c
|
/convertExcel/readcsv/csv.py
|
e6becbc60e1a6217163e96bfbb2dede375d34249
|
[] |
no_license
|
https://github.com/ImDaeseong/VocaKnow
|
72b6ff303e7b954856a246c1727030881a11383c
|
6231083a88cecb446fbc2793e1961b527a1c4a87
|
refs/heads/master
| 2023-03-16T15:12:55.860029 | 2021-03-19T09:27:11 | 2021-03-19T09:27:11 | 106,758,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import openpyxl
if __name__ == '__main__':
file = open('E:/kata.txt', 'w', encoding='utf8')
xls = openpyxl.load_workbook("aa.xlsx")
sheet = xls["Sheet1"]
for row in sheet.rows:
index = row[0].row
# print(index)
if index == 1:
continue
sPart1 = row[1].value
sPart2 = row[2].value
sPart3 = row[3].value
sKorea = row[4].value
sIndo = row[5].value
sPron = row[6].value
# print('index:{}|sPart2:{}:sPart3:{}:sKorea:{}:sIndo:{}:sPron:{}'.format(index - 1, sPart2, sPart3, sKorea,
# sIndo, sPron))
file.write(str('{}|{}|{}|{}|{}|{}\n'.format(index - 1, sPart2, sPart3, sKorea, sIndo, sPron)))
file.close()
|
UTF-8
|
Python
| false | false | 735 |
py
| 4 |
csv.py
| 1 | 0.515646 | 0.487075 | 0 | 28 | 25.25 | 116 |
kerneltravel/tinman
| 4,827,543,282,300 |
7a815bf5978960a1f628e719b0ceea74080cef9c
|
ffd896db3fb79a117db50dc6197d0290084e0e39
|
/tinman/handlers/redis_handlers.py
|
af4100745a31826485cf0d394b4eb4914bb82661
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/kerneltravel/tinman
|
52141885204b247b155e35830744b84445c28b47
|
2a7a59aabba340e98a938cc5ce39c547e196fd63
|
refs/heads/master
| 2021-01-16T20:05:40.538515 | 2013-09-25T02:20:22 | 2013-09-25T02:20:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""The RedisRequestHandler uses tornado-redis to support Redis. It will
auto-establish a single redis connection when initializing the connection.
"""
import logging
from tornado import web
LOGGER = logging.getLogger(__name__)
class RedisRequestHandler(web.RequestHandler):
"""This request handler will connect to Redis on initialize if the
connection is not previously set. This handler uses the redis library for
synchronous redis use.
"""
CONFIG_DB = 'db'
REDIS = 'redis'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
def _new_redis_client(self):
"""Create a new redis client and assign it to the application.attributes
object for reuse later.
"""
if 'redis' not in globals():
import redis
LOGGER.info('Creating new Redis instance')
settings = self._redis_settings
LOGGER.debug('Connecting to redis: %r', settings)
client = redis.Redis(**settings)
self.application.attributes.add(self.REDIS, client)
@property
def _redis_settings(self):
"""Return the Redis settings from configuration as a dict, defaulting
to localhost:6379:0 if it's not set in configuration. The dict format
is set to be passed as kwargs into the Client object.
:rtype: dict
"""
settings = self.application.settings.get('redis', dict())
return {'host': settings.get('host', self.REDIS_HOST),
'port': settings.get('port', self.REDIS_PORT),
self.CONFIG_DB: settings.get('db', self.REDIS_DB)}
def prepare(self):
"""Prepare RedisRequestHandler requests, ensuring that there is a
connected tornadoredis.Client object.
"""
super(RedisRequestHandler, self).prepare()
if self.REDIS not in self.application.attributes:
self._new_redis_client()
@property
def redis(self):
"""Return a handle to the active redis client.
:rtype: tornadoredis.Redis
"""
if self.REDIS not in self.application.attributes:
self._new_redis_client()
return self.application.attributes.redis
class AsynchronousRedisRequestHandler(RedisRequestHandler):
"""This request handler will connect to Redis on initialize if the
connection is not previously set and uses the tornado-redis library for
asynchronous use.
"""
CONFIG_DB = 'selected_db'
def _new_redis_client(self):
"""Create a new redis client and assign it to the application.attributes
object for reuse later.
"""
if 'tornadoredis' not in globals():
import tornadoredis
LOGGER.info('Creating new Redis instance')
settings = self._redis_settings
LOGGER.debug('Connecting to redis: %r', settings)
client = tornadoredis.Client(**settings)
client.connect()
self.application.attributes.add(self.REDIS, client)
|
UTF-8
|
Python
| false | false | 2,978 |
py
| 9 |
redis_handlers.py
| 8 | 0.650772 | 0.647414 | 0 | 91 | 31.725275 | 80 |
haveitjoewei/eventfinder
| 7,224,135,019,297 |
3a3d5374fc5939544801b1ea9993ab794bf292b9
|
fa1070295bc0ede61bc743fca96181135d56522c
|
/hello/templatetags/map_category.py
|
45e830f72f81d7f3de9ce3c7bea1ab5b3cd628d3
|
[] |
no_license
|
https://github.com/haveitjoewei/eventfinder
|
da6aa1511af7385525c91faa2c133d09dba627cb
|
6ae5769d42f7ba28229fb255dd8b46d00f81bed1
|
refs/heads/master
| 2018-01-09T04:52:50.250292 | 2016-01-26T12:26:21 | 2016-01-26T12:26:21 | 49,937,334 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import template
register = template.Library()
@register.filter
def map_category(value):
categories = {
'101': 'Business & Professional',
'102': 'Science & Technology',
'103': 'Music',
'104': 'Film, Media & Entertainment',
'105': 'Performing & Visual Arts',
'106': 'Fashion & Beauty',
'107': 'Health & Wellness',
'108': 'Sports & Fitness',
'109': 'Travel & Outdoor',
'110': 'Food & Drink',
'111': 'Charity & Causes',
'112': 'Government & Politics',
'113': 'Community & Culture',
'114': 'Religion & Spirituality',
'115': 'Family & Education',
'116': 'Seasonal & Holiday',
'117': 'Home & Lifestyle',
'118': 'Auto, Boat & Air',
'119': 'Hobbies & Special Interest',
'199': 'Other'
}
return categories[value]
|
UTF-8
|
Python
| false | false | 759 |
py
| 17 |
map_category.py
| 10 | 0.612648 | 0.533597 | 0 | 29 | 25.206897 | 39 |
XiDian-ChenMiao/python-master
| 7,567,732,416,904 |
6d4ed9d80f9921f000524233e7ad814968d6b567
|
9da6fdee6122604e4820b1e397c40eec6bfb02e9
|
/simple/config-parser.py
|
6b0df929f77d3c2fc4cd44546f2cc477791b5ebb
|
[] |
no_license
|
https://github.com/XiDian-ChenMiao/python-master
|
54c76d1b0888388972fd9ab4ad3e780c786fe779
|
673993d4d197138e89c2952d2be64b95463b19e9
|
refs/heads/master
| 2021-01-01T15:37:23.283938 | 2020-07-26T04:01:54 | 2020-07-26T04:01:54 | 97,656,390 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding:utf-8
# Version: 0.1
# Author: DAQINZHIDI
# License: Copyright(c) 2016 Miao.Chen
# Summary: 测试Python的解析模块ConfigParser
import configparser
cp = configparser.ConfigParser()
cp.read('config.ini', encoding='utf-8')
sections = cp.sections()
print('Sections:', sections)
opts = cp.options('sec_a')
print('Options in sec_a:', opts)
kvs = cp.items('sec_a')
print('Items in sec_a:', kvs)
str_val = cp.get('sec_a', 'a_key1')
int_val = cp.getint('sec_a', 'a_key2')
print('Value for sec_a[a_key1]', str_val)
print('Value for sec_a[a_key2]', int_val)
cp.set("sec_b", "b_key3", "new-$r")
cp.set("sec_b", "b_newkey", "new-value")
cp.add_section('a_new_section')
cp.set('a_new_section', 'new_key', 'new_value')
cp.write(open("config.conf", "w"))
|
UTF-8
|
Python
| false | false | 761 |
py
| 121 |
config-parser.py
| 121 | 0.666667 | 0.649123 | 0 | 25 | 28.64 | 47 |
js-tutul/Jsblog
| 369,367,212,494 |
98ca2a5015a86ae0e452014de6646746f096ef2e
|
b0fdc04fab517802ca3277d19099c61211a609f5
|
/accounts/views.py
|
a00e91e9591d13fd50c753959484749aa846d742
|
[] |
no_license
|
https://github.com/js-tutul/Jsblog
|
38aff00d9be652a9f83e30ff3058acaf5a04dbed
|
da001fd7eac1a60e1785669f96cf2dbf73212b33
|
refs/heads/master
| 2022-12-12T16:24:38.466319 | 2020-09-12T17:49:14 | 2020-09-12T17:49:14 | 275,648,059 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tempfile
from django.utils.html import strip_tags
import requests
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.core import files
from django.core.mail import EmailMessage
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from accounts.models import RegularUser,OrganizationUser
from mediablog.models import MediaBlog
# Create your views here.
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from io import BytesIO
from accounts.tokens import account_activation_token
def CreateR(user,city,phone,fb,photo,about):
regular=RegularUser(user_r=user)
regular.city=city
regular.phone=phone
regular.about_you=about
regular.photo=photo
regular.fb=fb
regular.save()
def CreateO(user,city,phone,web,address,purpose,photo,org_name):
organization = OrganizationUser(user_o=user)
organization.city=city
organization.phone=phone
organization.fb=web
organization.address=address
organization.purpose=purpose
organization.photo=photo
organization.o_name=org_name
organization.save()
def SIgnupView(request):
return render(request,'user/signup.html')
def LoginView(request):
if request.user.is_authenticated:
return redirect('Mainhome')
else:
if request.method == 'POST':
username=request.POST['username']
password=request.POST['password']
print(username,password)
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('Mainhome')
else:
messages.info(request,"Enter correct username and password")
return redirect('login')
else:
return render(request, 'user/login.html')
def Logout_view(request):
logout(request)
return redirect('login')
def RegularView(request):
if request.method=="POST":
username=request.POST['username']
first_name=request.POST['first_name']
last_name=request.POST['last_name']
email=request.POST['email']
password=request.POST['password']
comfirm_password=request.POST['comfirm_password']
facebook=request.POST['facebook']
city=request.POST['city']
phone=request.POST['phone']
about_you=request.POST['about_you']
photo=request.FILES['photo']
print(username,email,first_name,last_name,password,comfirm_password,city,facebook,phone,about_you,photo)
if password==comfirm_password:
if len(password)<8:
messages.info(request, "password must be 8 character and strong")
return redirect("signup")
elif User.objects.filter(username=username).exists():
messages.info(request, "user taken already")
return redirect("signup")
elif User.objects.filter(email=email).exists():
messages.info(request, "email already taken")
return redirect("signup")
else:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
email=email,
password=password)
user.save()
user.refresh_from_db()
CreateR(get_object_or_404(User,username=username),city,phone,facebook,photo,about_you)
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('user/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = email
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return HttpResponse('<h1>Please confirm your email address to complete the registration and back to login</h1>')
else:
messages.info(request,"Password not matching")
return redirect('signup')
else:
return redirect('signup')
def OrganizationView(request):
if request.method=="POST":
email=request.POST['org_email']
password=request.POST['password']
org_name=request.POST['org_name']
comfirm_password=request.POST['comfirm_password']
website=request.POST['website']
city=request.POST['org_city']
phone=request.POST['org_phone']
address=request.POST['org_address']
purpose = request.POST['purpose']
photo=request.FILES['org_photo']
if password==comfirm_password:
if len(password)<8:
messages.info(request, "password must be 8 character and strong")
return redirect("signup")
elif User.objects.filter(email=email).exists():
messages.info(request, "email already taken")
return redirect("signup")
else:
user = User.objects.create_user(username=email,email=email,password=password)
user.save()
user.refresh_from_db()
CreateO(get_object_or_404(User,username=email),city,phone,website,address,purpose,photo,org_name)
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('user/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = email
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return HttpResponse('<h1>Please confirm your email address to complete the registration and back to login</h1>')
else:
messages.info(request,"Password not matching")
return redirect('signup')
else:
return redirect('signup')
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
# return redirect('home')
return HttpResponse('<h2>Thank you for your email confirmation. Now you can login your account.</h2>')
else:
return HttpResponse('Activation link is invalid!')
def Dashboard(request):
if request.user.is_authenticated:
return render(request,'admin/index.html')
else:
return redirect('Mainhome')
def CreateMedia(user,title,link,des):
video_id = link.split('v=')[+1]
thumbnail_url = f"http://img.youtube.com/vi/{video_id}/sddefault.jpg"
request = requests.get(thumbnail_url,stream=True)
lf = tempfile.NamedTemporaryFile()
for block in request.iter_content(1024*8):
if not block:
break
lf.write(block)
media = MediaBlog(author=user)
media.title=title
media.link=link
media.description=des
media.thumbnail.save("thumbnail.jpg",files.File(lf))
def PostMedia(request):
if request.method=="POST":
post_title = request.POST['title']
link = request.POST['link']
description = request.POST['editor']
print(post_title,link,description)
CreateMedia(get_object_or_404(User,username=request.user),post_title,link,description)
return redirect('postmedia')
else:
return render(request,'user/postmedia.html')
|
UTF-8
|
Python
| false | false | 8,435 |
py
| 30 |
views.py
| 14 | 0.620747 | 0.615768 | 0 | 215 | 38.237209 | 128 |
sangyu1996/PythonByte
| 12,704,513,303,922 |
9b9e1315cf373cde21ad08c85d9540ea9d564ade
|
2128bc02edfda53ee1d9a85521e6afc5ac2fde74
|
/advanced_train/multi_threaded_and_multi_process/version_1.py
|
7b038f540bbe420526833c74e24596b8146add44
|
[] |
no_license
|
https://github.com/sangyu1996/PythonByte
|
9853d8b16c5040cee54451d04a36e97b6e4f3053
|
3c4024198f120954f0275e1a6236c90292897fff
|
refs/heads/master
| 2020-03-23T09:31:01.982063 | 2019-03-11T03:35:17 | 2019-03-11T03:35:17 | 141,392,484 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
# version 1 :下载.csv格式的文件并转换为.xml格式
import tushare as ts
import csv
from xml.etree.ElementTree import Element, ElementTree
def download(sid, cfname):
df = ts.get_hist_data(sid)
df.to_csv(cfname)
def csvToXml(cfname):
with open(cfname, 'rb') as cf:
reader = csv.reader(cf)
headers = reader.next()
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag, text in zip(headers, row):
e = Element(tag)
e.text = text
eRow.append(e)
return ElementTree(root)
if __name__ == '__main__':
sid = '000875'
cfname = sid + '.csv'
xfname = sid + '.xml'
download(sid, cfname)
et = csvToXml(cfname)
et.write(xfname)
|
UTF-8
|
Python
| false | false | 838 |
py
| 96 |
version_1.py
| 88 | 0.560345 | 0.550493 | 0 | 38 | 20.368421 | 54 |
fcherqui/Mind4stormwater
| 16,956,530,899,058 |
91e577d85fc0ff0c3b769a465fe2eb6667994b80
|
5e7dbdb33c25682bef058847fd8fd7ac6afbfd9c
|
/Pressure-sensor/config.py
|
c1d5359bed1b6f384b1507956e8216466e7346bd
|
[] |
no_license
|
https://github.com/fcherqui/Mind4stormwater
|
0bac3ac11e3550ee4d6223e7c9db75ac4f73d72a
|
ad5ff727c1591657a0db3f5cc3aa3daa91cb2674
|
refs/heads/master
| 2022-11-01T12:14:04.444174 | 2022-10-31T14:33:52 | 2022-10-31T14:33:52 | 240,187,448 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
########################
# CONFIG FILE !!!!
########################
# PARAMETERS
loraok= False # Send data through Lora ?
sigfoxok=True # Send data through Sigfox ?
needsleep= True # Enable deep sleep
powersaving= True # Optimize the code for power saving
nsensors=2 #number of probes, min=1, max=3
withRTC=False #True if there is a RTC clock
sdmandatory=False #True if there is a SD card to save the data
batcoeff=2.08 # Realvoltage=batcoeff * measurevoltage (was 1.98)
sleeptime=2 #sleep time in minutes, default=10mn (must be an integer)
# SIGFOX CONFIGURATION
# ==> device needs to be registered on SIGFOX !!!
# cf. sigfoxinit.py
# LORAWAN create an ABP authentication params to secure data
TTNregion="EU868" # "AU915" or "AS923" or "Special" (Special for the mono frequency choosen below)
# YOU NEED TO CREATE AN APP AND A DEVICE (ABP) ON TTN FIRST:
#for the ads1115multiproberj ttn app:
dev_addr='26011E9C'
nwk_swkey='7BB07A03D70812905BCC1C092FA84278'
app_swkey='284C2EDC9FCD7F53C134AAC329D0BD3B'
frequency = 868100000 #use for the "Special" Lora
sf = 7 #spreading factor (integer between 7 and 12)
dr = 5 #Data Rate
# PINs configuration
relay = 'P19' #P19 coonected to the nano timer relay done pin
sclPin='P22' # RTC SCL PIN (or OLED SCL PIN)
sdaPin='P23' # RTC SDA PIN (or OLED SCL PIN)
# led colors
OFF = 0x000000
WHITE=0xFFFFCC
YELLOW = 0x7f7f00
ORANGE = 0xFF9900
RED = 0xff0000
PURPLE = 0x7f007f
GREEN = 0x00ff00
BLUE = 0x0000ff
|
UTF-8
|
Python
| false | false | 1,491 |
py
| 49 |
config.py
| 18 | 0.705567 | 0.619048 | 0 | 44 | 31.886364 | 98 |
Harshit-Poddar90/hacktoberfest2K
| 9,861,244,955,368 |
ea255d0e7684852258b98492dc6261ac42079c61
|
9d460da8d8d81a7b79abeef2b06f729a49cfe268
|
/2k20/scripts2020/vokey-nick.py
|
52c5c1640b9a71cef9c0fa90eb32d54388a3e591
|
[] |
no_license
|
https://github.com/Harshit-Poddar90/hacktoberfest2K
|
fc9ab524af2325ad278dff3c368aadae53fcec81
|
c67ba3c3b7150b20d5ae0fff42a19323f7f0289f
|
refs/heads/master
| 2023-08-14T12:03:41.894852 | 2021-10-01T19:34:57 | 2021-10-01T19:34:57 | 412,990,115 | 1 | 0 | null | true | 2021-10-03T05:58:44 | 2021-10-03T05:58:43 | 2021-10-01T19:35:00 | 2021-10-02T15:51:36 | 105 | 0 | 0 | 0 | null | false | false |
def hacktoberfest():
a = "hack"
b = "tober"
c = "fest"
return (a+b+c)
print(hacktoberfest())
|
UTF-8
|
Python
| false | false | 110 |
py
| 24 |
vokey-nick.py
| 21 | 0.536364 | 0.536364 | 0 | 7 | 14.714286 | 22 |
zmanaa/Lane-Detection
| 10,548,439,722,851 |
dd6470764ff0460ee361149b6bacd1100a9214c0
|
c4cec2815d27a7974564173cbc19db518250739f
|
/Utils/constants.py
|
29d7356ccc59f7f8619f214ed75b0273ffb1b237
|
[] |
no_license
|
https://github.com/zmanaa/Lane-Detection
|
b28dd2ef8027a901a157367091ed10f62196f012
|
f4362463490cbd9499abcdeaa5d63a1d807a72ed
|
refs/heads/main
| 2023-08-10T19:13:15.611726 | 2021-10-06T21:08:15 | 2021-10-06T21:08:15 | 413,600,365 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
PIXELS = 2
PERC = np.pi/180
THRESHOLD = 100
LINEPIX = 40
LINEGAPS = 5
LINEWIDTH = 10
|
UTF-8
|
Python
| false | false | 128 |
py
| 5 |
constants.py
| 4 | 0.578125 | 0.484375 | 0 | 8 | 15 | 23 |
anthony-aylward/wasp_map
| 1,778,116,465,113 |
b3baeefa275bfe6bd2da25e9b2a35a08e16d8492
|
528ca20a0b137b2ad82a0217e52e9c68ac9d27ff
|
/wasp_map/download.py
|
558714f85003786ffe5a29c5fdb00e26937584eb
|
[
"MIT"
] |
permissive
|
https://github.com/anthony-aylward/wasp_map
|
6de4df8a26d8903beb7cdec1c6accca5162b3470
|
eadf96c44bb81e416c43ce2e33b7fbb164772cce
|
refs/heads/master
| 2021-06-23T07:44:26.617311 | 2021-02-26T10:04:02 | 2021-02-26T10:04:02 | 194,727,772 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#===============================================================================
# download.py
#===============================================================================
# Imports ======================================================================
import os
import os.path
import subprocess
from argparse import ArgumentParser
from git import Git
from hashlib import sha256
from shutil import copyfileobj
from tempfile import TemporaryDirectory
from urllib.request import urlopen
from wasp_map.env import ANACONDA_DIR, DIR
# Constants ====================================================================
ANACONDA_URL = os.environ.get(
'WASP_MAP_ANACONDA_URL',
'https://repo.anaconda.com/archive/Anaconda3-2019.03-Linux-x86_64.sh'
)
ANACONDA_HASH = os.environ.get(
'WASP_MAP_ANACONDA_HASH',
'45c851b7497cc14d5ca060064394569f724b67d9b5f98a926ed49b834a6bb73a'
)
CONDA_PATH = os.path.join(ANACONDA_DIR, 'bin', 'conda')
WASP_GITHUB_REPO = 'https://github.com/bmvdgeijn/WASP.git'
# Functions ====================================================================
def download_anaconda_install_script(anaconda_install_script_path):
print(
'Downloading Anaconda3 install script to '
f'{anaconda_install_script_path}'
)
with urlopen(ANACONDA_URL) as (
response
), open(anaconda_install_script_path, 'wb') as (
f
):
copyfileobj(response, f)
def check_hash(anaconda_install_script_path):
print(f'checking hash of {anaconda_install_script_path}')
with open(anaconda_install_script_path, 'rb') as f:
if sha256(f.read()).hexdigest() != ANACONDA_HASH:
raise RuntimeError(f'hash check failed for {ANACONDA_URL}')
def install_anaconda(anaconda_install_script_path):
input(
'installing Anaconda3. When prompted, specify the following '
f'install location:\n{ANACONDA_DIR}\npress ENTER to '
'continue >>>'
)
subprocess.run(('bash', anaconda_install_script_path))
def configure_anaconda():
print('configuring Anaconda3')
subprocess.run((CONDA_PATH, 'config', '--add', 'channels', 'r'))
subprocess.run((CONDA_PATH, 'config', '--add', 'channels', 'bioconda'))
subprocess.run((CONDA_PATH, 'install', 'pysam'))
def clone_wasp():
print(
'cloning the WASP github repo to '
f"{os.path.join(os.path.dirname(DIR), 'WASP')}"
)
Git(os.path.dirname(DIR)).clone(WASP_GITHUB_REPO)
def parse_arguments():
parser = ArgumentParser(description='download and install WASP')
parser.add_argument(
'--tmp-dir',
metavar='<dir/for/temp/files>',
help='directory to use for temporary files'
)
return parser.parse_args()
def main():
args = parse_arguments()
if os.path.isdir(ANACONDA_DIR):
use_existing_anaconda_dir = (
input(
f'There is already a directory at {ANACONDA_DIR} - is this the '
'anaconda you wish to use? ([y]/n) >>>'
).casefold()
in {'', 'y', 'yes'}
)
if not use_existing_anaconda_dir:
print(
'Please change the value of environment variable '
'WASP_MAP_ANACONDA_DIR or remove the existing directory at '
'that location.'
)
return
elif os.path.exists(ANACONDA_DIR):
raise RuntimeError(
f'There is a non-directory file at {ANACONDA_DIR}. Please change '
'the value of environment variable WASP_MAP_ANACONDA_DIR or '
'remove the existing file at that location.'
)
else:
use_existing_anaconda_dir = False
if os.path.isdir(DIR):
use_existing_wasp_dir = (
input(
f'There is already a directory at {DIR} - is this the '
'WASP you wish to use? ([y]/n) >>>'
).casefold() in {'', 'y', 'yes'}
)
if not use_existing_wasp_dir:
print(
'Please change the value of environment variable WASP_MAP_DIR '
'or remove the existing directory at that location.'
)
return
elif os.path.exists(DIR):
raise RuntimeError(
f'There is a non-directory file at {DIR} Please change '
'the value of environment variable WASP_MAP_DIR or '
'remove the existing file at that location.'
)
else:
use_existing_wasp_dir = False
if not use_existing_anaconda_dir:
with TemporaryDirectory(dir=args.tmp_dir) as temp_dir:
anaconda_install_script_path = os.path.join(
temp_dir, 'Anaconda3-2019.03-Linux-x86_64.sh'
)
download_anaconda_install_script(anaconda_install_script_path)
check_hash(anaconda_install_script_path)
install_anaconda(anaconda_install_script_path)
configure_anaconda()
if not use_existing_wasp_dir:
clone_wasp()
|
UTF-8
|
Python
| false | false | 4,989 |
py
| 8 |
download.py
| 7 | 0.569854 | 0.554821 | 0 | 151 | 32.039735 | 80 |
sthomen/zc
| 6,717,328,863,504 |
8d3ffd3cb2682161584439a594d8b377367e466a
|
7eb3bd3389b15f90fe4e8eb556ec566ff6db832b
|
/zc/dns/rr/a.py
|
f4898f696b7615458c86bd5195cac2541ab0f815
|
[] |
no_license
|
https://github.com/sthomen/zc
|
d69ce685d0579dd8d0a5e938e04d40a2ca956774
|
7d26a2b4d073fa8dedf15fb5492ff706ab095c92
|
refs/heads/master
| 2023-06-03T22:38:31.047734 | 2020-11-14T20:08:03 | 2020-11-14T20:08:03 | 377,566,555 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from struct import unpack
from .rdata import RData
from ..util import sub
from .rrplugin import RRPlugin
@RRPlugin.register(type=1)
class A(RData):
def decode(self):
self.raw = sub(self.raw, self.offset, 4)
self.address = [ int(v) for v in unpack('!BBBB', self.raw) ]
return self
def encode(self):
self.raw = pack('!BBBB', *self.address)
|
UTF-8
|
Python
| false | false | 351 |
py
| 38 |
a.py
| 36 | 0.698006 | 0.692308 | 0 | 15 | 22.4 | 62 |
hammal/PythonADC
| 11,373,073,447,266 |
0166034cf389efdd527b004bf21b930a73cbf3ab
|
fa72108bf319e00e40b3cd05b3213bdfbb5dcb51
|
/Experiments/s3scripts/download_from_bucket.py
|
596310bdafb4a0627f9ab50a0c37632b855f38e5
|
[] |
no_license
|
https://github.com/hammal/PythonADC
|
b51faad98926afb63711570a107412b43be0e412
|
1f9be8298bf2992196ede7e01d7c3287048bc4a3
|
refs/heads/master
| 2023-03-26T17:01:45.354175 | 2021-03-21T17:09:54 | 2021-03-21T17:09:54 | 335,706,884 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import boto3
import uuid
import argparse
from s3utils import downloadFromS3
BUCKET_NAME = 'paralleladcexperiments5b70cd4e-74d3-4496-96fa-f4025220d48c'
def main(file_name, destination):
s3_resource = boto3.resource('s3')
downloadFromS3(s3_resource, BUCKET_NAME, file_name, destination)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-f', '--file_name', type=str)
arg_parser.add_argument('-d', '--destination', type=str)
args = vars(arg_parser.parse_args())
main(**args)
|
UTF-8
|
Python
| false | false | 551 |
py
| 58 |
download_from_bucket.py
| 52 | 0.696915 | 0.642468 | 0 | 21 | 25.285714 | 74 |
zconn/PythonCert220Assign
| 8,675,833,942,408 |
ac98c4e5e25a8ae058e256bb7efad9398a08de8c
|
0c72282d601ccf840dd4e41b675c0675de7bc916
|
/students/HABTAMU/lesson03/assignment/basic_operations.py
|
0abbf2d973b7ae3e01d0f8a6fcab80841989899a
|
[] |
no_license
|
https://github.com/zconn/PythonCert220Assign
|
c7fedd9ffae4f9e74e5e4dfc59bc6c511c7900ab
|
99271cd60485bd2e54f8d133c9057a2ccd6c91c2
|
refs/heads/master
| 2020-04-15T14:42:08.765699 | 2019-03-14T09:13:36 | 2019-03-14T09:13:36 | 164,763,504 | 2 | 0 | null | true | 2019-01-09T01:34:40 | 2019-01-09T01:34:40 | 2019-01-08T20:21:44 | 2019-01-08T20:21:43 | 788 | 0 | 0 | 0 | null | false | null |
"""
Learning persistence with Peewee and sqlite
delete the database to start over
(but running this program does not require it)
Person:
1. insert records
2. display all records
3. show transactions
4. show error checking
5. show logging (to explain what's going on)
"""
from customers_model import Customer as cls_Customer
import logging
from peewee import SqliteDatabase
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
database = SqliteDatabase('Customer.db')
CUSTOMER_ID = 0
FIRST_NAME = 1
LAST_NAME = 2
HOME_ADDRESS = 3
PHONE_NUMBER = 4
EMAIL_ADDRESS = 5
STATUS = 6
CREDIT_LIMIT = 7
# def add_customer(customer_id, first_name, last_name, home_address, phone_number, email_address, status, credit_limit):
# """
# add customer to database
# :type credit_limit: object
# customer_id, first_name, last_name, home_address, phone_number, email_address, status, credit_limit
# """
#
# customers = [
# #
# # ('123', 'Andrew', 'John','123th 32nd NE,SEA, 98101, WA', '123-131-3331','andrew_john@uw.edi', 'True', '3,320.00'),
# # ('234', 'And', 'Joe', '321th 2nd NE, SEA, 98101, WA', '300-131-3331', 'and@uw.edi', 'True', '2,220.00'),
# # ('345', 'Joe', 'La', '12th 2nd NE, SEA, 98101, WA', '434-131-3331', 'joe@uw.edi', 'True', '20,220.00')
# (customer_id, first_name, last_name, home_address, phone_number, email_address, status, credit_limit)
# ]
#
# logger.info('Creating customer records: iterate through the list of tuples')
# logger.info('Prepare to explain any errors with exceptions')
# logger.info('and the transaction tells the database to rollback on error')
#
# try:
# database.connect()
# database.execute_sql('PRAGMA foreign_keys = ON;')
# for customer in customers:
# with database.transaction():
# new_customer = cls_Customer.create(
# customer_id = customer[CUSTOMER_ID],
# first_name = customer[FIRST_NAME],
# last_name = customer[LAST_NAME],
# home_address = customer[HOME_ADDRESS],
# phone_number = customer[PHONE_NUMBER],
# email_address = customer[EMAIL_ADDRESS],
# status = customer[STATUS],
# credit_limit = customer[CREDIT_LIMIT]
# )
# new_customer.save()
# logger.info('Database add successful')
#
# logger.info('Print the Customer records we just added ...')
# for added_customer in cls_Customer:
# logger.info(f'customer whose id {added_customer.customer_id} known to be with his first name ' +\
# f'{added_customer.first_name} lives in {added_customer.home_address} just added')
# except Exception as e:
# logger.info(f'Error creating = {customer[FIRST_NAME]}')
# logger.info(e)
# logger.info('see how the database protects our data')
#
# finally:
# logger.info('database closes')
# database.close()
# def search_customer(customer_id):
# """
# This function will return a dictionary object with name, lastname,
# email address and phone number of a customer
# or an empty dictionary object if no customer was found.
# :param customer_id:
# :return: dictionary object with name, lastname, email address and phone number of a customer
# """
#
# try:
# database.connect()
# database.execute_sql('PRAGMA foreign_keys = ON;')
#
# a_customer = cls_Customer.get(cls_Customer.customer_id == '234')
# with database.transaction():
# for customer in cls_Customer.select().where(cls_Customer.customer_id == '234'):
# logger.info(f' Here is the search item returned with dict below \n ' +\
# f'\n FIRST_NAME: {a_customer.first_name} ' +\
# f'\n LAST_NAME: {a_customer.last_name}, ' +\
# f'\n EMAIL_ADDRESS: {a_customer.email_address} ' +\
# f'\n PHONE_NUMBER: {a_customer.phone_number}')
# except Exception as e:
# logger.info(f'Error finding = {customer[FIRST_NAME]}')
# logger.info(e)
# logger.info('see how the database protects our data')
#
# finally:
# logger.info('database closes')
# database.close()
#
# def delete_customer(customer_id):
# """
# This function will delete a customer from the sqlite3 database.
# :param customer_id:
# """
#
# try:
# with database.transaction():
# a_customer = cls_Customer.get(cls_Customer.customer_id == '123')
# logger.info(f'Trying to delete as customer with customer id {a_customer.customer_id} and his first name was {a_customer.first_name}')
# a_customer.delete_instance()
#
# except Exception as e:
# logger.info('Delete failed because Andrew has Jobs')
# logger.info(f'Delete failed: {a_customer.first_name}')
# logger.info(e)
# finally:
# logger.info('database closes')
# database.close()
#
# def update_customer_credit(customer_id, credit_limit):
# """
# This function will search an existing customer by customer_id and update their credit limit or
# raise a ValueError exception if the customer does not exist.
#
# :param customer_id:
# :param credit_limit:
# :return:
# """
# try:
# database.connect()
# database.execute_sql('PRAGMA foreign_keys = ON;')
# # a_customer = cls_Customer.get(cls_Customer.customer_id == '345')
# a_customer = cls_Customer.get(cls_Customer.customer_id == customer_id)
# with database.transaction():
# # for customer in cls_Customer.select().where(cls_Customer.customer_id == '345'):
# for customer in cls_Customer.select().where(cls_Customer.customer_id == a_customer):
# logger.info(f' customer with id {customer.customer_id} have a credit limit: {customer.credit_limit}')
# # if customer.customer_id == '345':
# logger.info('Update the credit limit here')
# customer.credit_limit = credit_limit
# customer.save()
# # else:
# # logger.info(f'Not giving a credit limit to {customer.customer_id}')
#
# logger.info(f'And here is where we prove it by finding current credit limit as {a_customer.credit_limit }')
# # a_customer = cls_Customer.get(a_customer.customer_id == '345')
# logger.info(f'{customer.customer_id} now has a credit limit of {customer.credit_limit}')
#
# except Exception as e:
# logger.info('update failed because ....')
# logger.info(f'update failed: {a_customer.credit_limit}')
# logger.info(e)
# finally:
# logger.info('database closes')
# database.close()
def list_active_customers():
"""
:return: This function will return an integer with the number of customers whose status is currently active.
"""
try:
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
with database.transaction():
for customer in cls_Customer.select().where(cls_Customer.status == 1):
logger.info(f'customers whose first name is {customer.first_name} current status is active')
except Exception as e:
logger.info('there could be no active ')
logger.info(e)
finally:
logger.info('database closes')
database.close()
if __name__ == '__main__':
# add_customer('567', 'Simon', 'Derike', '3rd 2nd NE,SEA, 98111, WA', '765-131-3331', 'Simon@uw.edi', False, 5320.00)
# search_customer(234)
# delete_customer(123)
# update_customer_credit(345, 7213.00)
list_active_customers()
|
UTF-8
|
Python
| false | false | 8,044 |
py
| 375 |
basic_operations.py
| 326 | 0.594107 | 0.573968 | 0 | 198 | 39.616162 | 147 |
bmeurer/emscripten
| 111,669,193,284 |
a51ce333cce958a5c8d37b745f201843bf15a3e5
|
b2766f8cf250120b341a6150303d97033139663c
|
/site/source/api_items.py
|
01766673931b8f08715961656bbfe15f2d2e719e
|
[
"NCSA",
"MIT"
] |
permissive
|
https://github.com/bmeurer/emscripten
|
dbca4d0f1d0db1ea30b1eece7a4a4b9eec473f68
|
59a416cb3f72d746678fc44a4d2626eb6be98151
|
refs/heads/master
| 2023-02-15T03:31:17.464894 | 2020-07-09T17:40:24 | 2020-07-09T17:40:24 | 265,643,199 | 1 | 0 |
NOASSERTION
| true | 2020-05-20T17:47:05 | 2020-05-20T17:47:04 | 2020-05-20T17:01:14 | 2020-05-20T17:34:26 | 208,961 | 0 | 0 | 0 | null | false | false |
# Auto-generated file (see get_api_items.py)
def get_mapped_items():
mapped_wiki_inline_code = dict()
mapped_wiki_inline_code['*emscripten_get_preloaded_image_data'] = ':c:func:`*emscripten_get_preloaded_image_data`'
mapped_wiki_inline_code['*emscripten_get_preloaded_image_data()'] = ':c:func:`*emscripten_get_preloaded_image_data`'
mapped_wiki_inline_code['*emscripten_get_preloaded_image_data_from_FILE'] = ':c:func:`*emscripten_get_preloaded_image_data_from_FILE`'
mapped_wiki_inline_code['*emscripten_get_preloaded_image_data_from_FILE()'] = ':c:func:`*emscripten_get_preloaded_image_data_from_FILE`'
mapped_wiki_inline_code['*emscripten_run_script_string'] = ':c:func:`*emscripten_run_script_string`'
mapped_wiki_inline_code['*emscripten_run_script_string()'] = ':c:func:`*emscripten_run_script_string`'
mapped_wiki_inline_code[':'] = ':cpp:class:`:`'
mapped_wiki_inline_code['AsciiToString'] = ':js:func:`AsciiToString`'
mapped_wiki_inline_code['AsciiToString()'] = ':js:func:`AsciiToString`'
mapped_wiki_inline_code['DOM_DELTA_LINE'] = ':c:macro:`DOM_DELTA_LINE`'
mapped_wiki_inline_code['DOM_DELTA_PAGE'] = ':c:macro:`DOM_DELTA_PAGE`'
mapped_wiki_inline_code['DOM_DELTA_PIXEL'] = ':c:macro:`DOM_DELTA_PIXEL`'
mapped_wiki_inline_code['DOM_KEY_LOCATION'] = ':c:macro:`DOM_KEY_LOCATION`'
mapped_wiki_inline_code['EMSCRIPTEN_BINDINGS'] = ':cpp:func:`EMSCRIPTEN_BINDINGS`'
mapped_wiki_inline_code['EMSCRIPTEN_BINDINGS()'] = ':cpp:func:`EMSCRIPTEN_BINDINGS`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_BATTERYCHARGINGCHANGE'] = ':c:macro:`EMSCRIPTEN_EVENT_BATTERYCHARGINGCHANGE`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_BEFOREUNLOAD'] = ':c:macro:`EMSCRIPTEN_EVENT_BEFOREUNLOAD`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_BLUR'] = ':c:macro:`EMSCRIPTEN_EVENT_BLUR`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_CLICK'] = ':c:macro:`EMSCRIPTEN_EVENT_CLICK`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_DEVICEMOTION'] = ':c:macro:`EMSCRIPTEN_EVENT_DEVICEMOTION`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_DEVICEORIENTATION'] = ':c:macro:`EMSCRIPTEN_EVENT_DEVICEORIENTATION`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_FULLSCREENCHANGE'] = ':c:macro:`EMSCRIPTEN_EVENT_FULLSCREENCHANGE`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_GAMEPADCONNECTED'] = ':c:macro:`EMSCRIPTEN_EVENT_GAMEPADCONNECTED`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_KEYPRESS'] = ':c:macro:`EMSCRIPTEN_EVENT_KEYPRESS`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_ORIENTATIONCHANGE'] = ':c:macro:`EMSCRIPTEN_EVENT_ORIENTATIONCHANGE`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_POINTERLOCKCHANGE'] = ':c:macro:`EMSCRIPTEN_EVENT_POINTERLOCKCHANGE`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_POINTERLOCKERROR'] = ':c:macro:`EMSCRIPTEN_EVENT_POINTERLOCKERROR`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_RESIZE'] = ':c:macro:`EMSCRIPTEN_EVENT_RESIZE`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_TOUCHSTART'] = ':c:macro:`EMSCRIPTEN_EVENT_TOUCHSTART`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_VISIBILITYCHANGE'] = ':c:macro:`EMSCRIPTEN_EVENT_VISIBILITYCHANGE`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_WEBGLCONTEXTLOST'] = ':c:macro:`EMSCRIPTEN_EVENT_WEBGLCONTEXTLOST`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_WHEEL'] = ':c:macro:`EMSCRIPTEN_EVENT_WHEEL`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE_HIDEF'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE_HIDEF`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE_NONE'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE_NONE`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE_STDDEF'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_CANVAS_SCALE_STDDEF`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_FILTERING'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_FILTERING`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_FILTERING_BILINEAR'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_FILTERING_BILINEAR`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_FILTERING_DEFAULT'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_FILTERING_DEFAULT`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_FILTERING_NEAREST'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_FILTERING_NEAREST`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_SCALE'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_SCALE`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_SCALE_ASPECT'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_SCALE_ASPECT`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_SCALE_DEFAULT'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_SCALE_DEFAULT`'
mapped_wiki_inline_code['EMSCRIPTEN_FULLSCREEN_SCALE_STRETCH'] = ':c:macro:`EMSCRIPTEN_FULLSCREEN_SCALE_STRETCH`'
mapped_wiki_inline_code['EMSCRIPTEN_KEEPALIVE'] = ':c:macro:`EMSCRIPTEN_KEEPALIVE`'
mapped_wiki_inline_code['EMSCRIPTEN_ORIENTATION_LANDSCAPE_PRIMARY'] = ':c:macro:`EMSCRIPTEN_ORIENTATION_LANDSCAPE_PRIMARY`'
mapped_wiki_inline_code['EMSCRIPTEN_ORIENTATION_LANDSCAPE_SECONDARY'] = ':c:macro:`EMSCRIPTEN_ORIENTATION_LANDSCAPE_SECONDARY`'
mapped_wiki_inline_code['EMSCRIPTEN_ORIENTATION_PORTRAIT_PRIMARY'] = ':c:macro:`EMSCRIPTEN_ORIENTATION_PORTRAIT_PRIMARY`'
mapped_wiki_inline_code['EMSCRIPTEN_ORIENTATION_PORTRAIT_SECONDARY'] = ':c:macro:`EMSCRIPTEN_ORIENTATION_PORTRAIT_SECONDARY`'
mapped_wiki_inline_code['EMSCRIPTEN_RESULT'] = ':c:macro:`EMSCRIPTEN_RESULT`'
mapped_wiki_inline_code['EMSCRIPTEN_VISIBILITY_HIDDEN'] = ':c:macro:`EMSCRIPTEN_VISIBILITY_HIDDEN`'
mapped_wiki_inline_code['EMSCRIPTEN_VISIBILITY_PRERENDER'] = ':c:macro:`EMSCRIPTEN_VISIBILITY_PRERENDER`'
mapped_wiki_inline_code['EMSCRIPTEN_VISIBILITY_UNLOADED'] = ':c:macro:`EMSCRIPTEN_VISIBILITY_UNLOADED`'
mapped_wiki_inline_code['EMSCRIPTEN_VISIBILITY_VISIBLE'] = ':c:macro:`EMSCRIPTEN_VISIBILITY_VISIBLE`'
mapped_wiki_inline_code['EMSCRIPTEN_WEBGL_CONTEXT_HANDLE'] = ':c:type:`EMSCRIPTEN_WEBGL_CONTEXT_HANDLE`'
mapped_wiki_inline_code['EMSCRIPTEN_WRAPPER'] = ':cpp:func:`EMSCRIPTEN_WRAPPER`'
mapped_wiki_inline_code['EMSCRIPTEN_WRAPPER()'] = ':cpp:func:`EMSCRIPTEN_WRAPPER`'
mapped_wiki_inline_code['EM_ASM'] = ':c:macro:`EM_ASM`'
mapped_wiki_inline_code['EM_ASM_INT'] = ':c:macro:`EM_ASM_INT`'
mapped_wiki_inline_code['EM_BOOL'] = ':c:macro:`EM_BOOL`'
mapped_wiki_inline_code['EM_JS'] = ':c:macro:`EM_JS`'
mapped_wiki_inline_code['EM_LOG_CONSOLE'] = ':c:macro:`EM_LOG_CONSOLE`'
mapped_wiki_inline_code['EM_LOG_C_STACK'] = ':c:macro:`EM_LOG_C_STACK`'
mapped_wiki_inline_code['EM_LOG_DEMANGLE'] = ':c:macro:`EM_LOG_DEMANGLE`'
mapped_wiki_inline_code['EM_LOG_ERROR'] = ':c:macro:`EM_LOG_ERROR`'
mapped_wiki_inline_code['EM_LOG_FUNC_PARAMS'] = ':c:macro:`EM_LOG_FUNC_PARAMS`'
mapped_wiki_inline_code['EM_LOG_JS_STACK'] = ':c:macro:`EM_LOG_JS_STACK`'
mapped_wiki_inline_code['EM_LOG_NO_PATHS'] = ':c:macro:`EM_LOG_NO_PATHS`'
mapped_wiki_inline_code['EM_LOG_WARN'] = ':c:macro:`EM_LOG_WARN`'
mapped_wiki_inline_code['EM_LOG_INFO'] = ':c:macro:`EM_LOG_INFO`'
mapped_wiki_inline_code['EM_LOG_DEBUG'] = ':c:macro:`EM_LOG_DEBUG`'
mapped_wiki_inline_code['EM_UTF8'] = ':c:macro:`EM_UTF8`'
mapped_wiki_inline_code['EmscriptenBatteryEvent'] = ':c:type:`EmscriptenBatteryEvent`'
mapped_wiki_inline_code['EmscriptenDeviceMotionEvent'] = ':c:type:`EmscriptenDeviceMotionEvent`'
mapped_wiki_inline_code['EmscriptenDeviceOrientationEvent'] = ':c:type:`EmscriptenDeviceOrientationEvent`'
mapped_wiki_inline_code['EmscriptenFocusEvent'] = ':c:type:`EmscriptenFocusEvent`'
mapped_wiki_inline_code['EmscriptenFullscreenChangeEvent'] = ':c:type:`EmscriptenFullscreenChangeEvent`'
mapped_wiki_inline_code['EmscriptenFullscreenStrategy'] = ':c:type:`EmscriptenFullscreenStrategy`'
mapped_wiki_inline_code['EmscriptenGamepadEvent'] = ':c:type:`EmscriptenGamepadEvent`'
mapped_wiki_inline_code['EmscriptenKeyboardEvent'] = ':c:type:`EmscriptenKeyboardEvent`'
mapped_wiki_inline_code['EmscriptenMouseEvent'] = ':c:type:`EmscriptenMouseEvent`'
mapped_wiki_inline_code['EmscriptenOrientationChangeEvent'] = ':c:type:`EmscriptenOrientationChangeEvent`'
mapped_wiki_inline_code['EmscriptenPointerlockChangeEvent'] = ':c:type:`EmscriptenPointerlockChangeEvent`'
mapped_wiki_inline_code['EmscriptenTouchEvent'] = ':c:type:`EmscriptenTouchEvent`'
mapped_wiki_inline_code['EmscriptenTouchPoint'] = ':c:type:`EmscriptenTouchPoint`'
mapped_wiki_inline_code['EmscriptenUiEvent'] = ':c:type:`EmscriptenUiEvent`'
mapped_wiki_inline_code['EmscriptenVisibilityChangeEvent'] = ':c:type:`EmscriptenVisibilityChangeEvent`'
mapped_wiki_inline_code['EmscriptenWebGLContextAttributes'] = ':c:type:`EmscriptenWebGLContextAttributes`'
mapped_wiki_inline_code['EmscriptenWheelEvent'] = ':c:type:`EmscriptenWheelEvent`'
mapped_wiki_inline_code['FS.chmod'] = ':js:func:`FS.chmod`'
mapped_wiki_inline_code['FS.chmod()'] = ':js:func:`FS.chmod`'
mapped_wiki_inline_code['FS.chown'] = ':js:func:`FS.chown`'
mapped_wiki_inline_code['FS.chown()'] = ':js:func:`FS.chown`'
mapped_wiki_inline_code['FS.close'] = ':js:func:`FS.close`'
mapped_wiki_inline_code['FS.close()'] = ':js:func:`FS.close`'
mapped_wiki_inline_code['FS.createLazyFile'] = ':js:func:`FS.createLazyFile`'
mapped_wiki_inline_code['FS.createLazyFile()'] = ':js:func:`FS.createLazyFile`'
mapped_wiki_inline_code['FS.createPreloadedFile'] = ':js:func:`FS.createPreloadedFile`'
mapped_wiki_inline_code['FS.createPreloadedFile()'] = ':js:func:`FS.createPreloadedFile`'
mapped_wiki_inline_code['FS.cwd'] = ':js:func:`FS.cwd`'
mapped_wiki_inline_code['FS.cwd()'] = ':js:func:`FS.cwd`'
mapped_wiki_inline_code['FS.fchmod'] = ':js:func:`FS.fchmod`'
mapped_wiki_inline_code['FS.fchmod()'] = ':js:func:`FS.fchmod`'
mapped_wiki_inline_code['FS.fchown'] = ':js:func:`FS.fchown`'
mapped_wiki_inline_code['FS.fchown()'] = ':js:func:`FS.fchown`'
mapped_wiki_inline_code['FS.ftruncate'] = ':js:func:`FS.ftruncate`'
mapped_wiki_inline_code['FS.ftruncate()'] = ':js:func:`FS.ftruncate`'
mapped_wiki_inline_code['FS.getMode'] = ':js:func:`FS.getMode`'
mapped_wiki_inline_code['FS.getMode()'] = ':js:func:`FS.getMode`'
mapped_wiki_inline_code['FS.getPath'] = ':js:func:`FS.getPath`'
mapped_wiki_inline_code['FS.getPath()'] = ':js:func:`FS.getPath`'
mapped_wiki_inline_code['FS.handleFSError'] = ':js:func:`FS.handleFSError`'
mapped_wiki_inline_code['FS.handleFSError()'] = ':js:func:`FS.handleFSError`'
mapped_wiki_inline_code['FS.init'] = ':js:func:`FS.init`'
mapped_wiki_inline_code['FS.init()'] = ':js:func:`FS.init`'
mapped_wiki_inline_code['FS.isBlkdev'] = ':js:func:`FS.isBlkdev`'
mapped_wiki_inline_code['FS.isBlkdev()'] = ':js:func:`FS.isBlkdev`'
mapped_wiki_inline_code['FS.isChrdev'] = ':js:func:`FS.isChrdev`'
mapped_wiki_inline_code['FS.isChrdev()'] = ':js:func:`FS.isChrdev`'
mapped_wiki_inline_code['FS.isDir'] = ':js:func:`FS.isDir`'
mapped_wiki_inline_code['FS.isDir()'] = ':js:func:`FS.isDir`'
mapped_wiki_inline_code['FS.isFile'] = ':js:func:`FS.isFile`'
mapped_wiki_inline_code['FS.isFile()'] = ':js:func:`FS.isFile`'
mapped_wiki_inline_code['FS.isLink'] = ':js:func:`FS.isLink`'
mapped_wiki_inline_code['FS.isLink()'] = ':js:func:`FS.isLink`'
mapped_wiki_inline_code['FS.isSocket'] = ':js:func:`FS.isSocket`'
mapped_wiki_inline_code['FS.isSocket()'] = ':js:func:`FS.isSocket`'
mapped_wiki_inline_code['FS.lchmod'] = ':js:func:`FS.lchmod`'
mapped_wiki_inline_code['FS.lchmod()'] = ':js:func:`FS.lchmod`'
mapped_wiki_inline_code['FS.lchown'] = ':js:func:`FS.lchown`'
mapped_wiki_inline_code['FS.lchown()'] = ':js:func:`FS.lchown`'
mapped_wiki_inline_code['FS.llseek'] = ':js:func:`FS.llseek`'
mapped_wiki_inline_code['FS.llseek()'] = ':js:func:`FS.llseek`'
mapped_wiki_inline_code['FS.lookupPath'] = ':js:func:`FS.lookupPath`'
mapped_wiki_inline_code['FS.lookupPath()'] = ':js:func:`FS.lookupPath`'
mapped_wiki_inline_code['FS.lstat'] = ':js:func:`FS.lstat`'
mapped_wiki_inline_code['FS.lstat()'] = ':js:func:`FS.lstat`'
mapped_wiki_inline_code['FS.makedev'] = ':js:func:`FS.makedev`'
mapped_wiki_inline_code['FS.makedev()'] = ':js:func:`FS.makedev`'
mapped_wiki_inline_code['FS.mkdev'] = ':js:func:`FS.mkdev`'
mapped_wiki_inline_code['FS.mkdev()'] = ':js:func:`FS.mkdev`'
mapped_wiki_inline_code['FS.mkdir'] = ':js:func:`FS.mkdir`'
mapped_wiki_inline_code['FS.mkdir()'] = ':js:func:`FS.mkdir`'
mapped_wiki_inline_code['FS.mount'] = ':js:func:`FS.mount`'
mapped_wiki_inline_code['FS.mount()'] = ':js:func:`FS.mount`'
mapped_wiki_inline_code['FS.open'] = ':js:func:`FS.open`'
mapped_wiki_inline_code['FS.open()'] = ':js:func:`FS.open`'
mapped_wiki_inline_code['FS.read'] = ':js:func:`FS.read`'
mapped_wiki_inline_code['FS.read()'] = ':js:func:`FS.read`'
mapped_wiki_inline_code['FS.readFile'] = ':js:func:`FS.readFile`'
mapped_wiki_inline_code['FS.readFile()'] = ':js:func:`FS.readFile`'
mapped_wiki_inline_code['FS.readlink'] = ':js:func:`FS.readlink`'
mapped_wiki_inline_code['FS.readlink()'] = ':js:func:`FS.readlink`'
mapped_wiki_inline_code['FS.registerDevice'] = ':js:func:`FS.registerDevice`'
mapped_wiki_inline_code['FS.registerDevice()'] = ':js:func:`FS.registerDevice`'
mapped_wiki_inline_code['FS.rename'] = ':js:func:`FS.rename`'
mapped_wiki_inline_code['FS.rename()'] = ':js:func:`FS.rename`'
mapped_wiki_inline_code['FS.rmdir'] = ':js:func:`FS.rmdir`'
mapped_wiki_inline_code['FS.rmdir()'] = ':js:func:`FS.rmdir`'
mapped_wiki_inline_code['FS.stat'] = ':js:func:`FS.stat`'
mapped_wiki_inline_code['FS.stat()'] = ':js:func:`FS.stat`'
mapped_wiki_inline_code['FS.symlink'] = ':js:func:`FS.symlink`'
mapped_wiki_inline_code['FS.symlink()'] = ':js:func:`FS.symlink`'
mapped_wiki_inline_code['FS.syncfs'] = ':js:func:`FS.syncfs`'
mapped_wiki_inline_code['FS.syncfs()'] = ':js:func:`FS.syncfs`'
mapped_wiki_inline_code['FS.truncate'] = ':js:func:`FS.truncate`'
mapped_wiki_inline_code['FS.truncate()'] = ':js:func:`FS.truncate`'
mapped_wiki_inline_code['FS.unlink'] = ':js:func:`FS.unlink`'
mapped_wiki_inline_code['FS.unlink()'] = ':js:func:`FS.unlink`'
mapped_wiki_inline_code['FS.unmount'] = ':js:func:`FS.unmount`'
mapped_wiki_inline_code['FS.unmount()'] = ':js:func:`FS.unmount`'
mapped_wiki_inline_code['FS.utime'] = ':js:func:`FS.utime`'
mapped_wiki_inline_code['FS.utime()'] = ':js:func:`FS.utime`'
mapped_wiki_inline_code['FS.write'] = ':js:func:`FS.write`'
mapped_wiki_inline_code['FS.write()'] = ':js:func:`FS.write`'
mapped_wiki_inline_code['FS.writeFile'] = ':js:func:`FS.writeFile`'
mapped_wiki_inline_code['FS.writeFile()'] = ':js:func:`FS.writeFile`'
mapped_wiki_inline_code['HEAP16'] = ':js:data:`HEAP16`'
mapped_wiki_inline_code['HEAP32'] = ':js:data:`HEAP32`'
mapped_wiki_inline_code['HEAP8'] = ':js:data:`HEAP8`'
mapped_wiki_inline_code['HEAPF32'] = ':js:data:`HEAPF32`'
mapped_wiki_inline_code['HEAPF64'] = ':js:data:`HEAPF64`'
mapped_wiki_inline_code['HEAPU16'] = ':js:data:`HEAPU16`'
mapped_wiki_inline_code['HEAPU32'] = ':js:data:`HEAPU32`'
mapped_wiki_inline_code['HEAPU8'] = ':js:data:`HEAPU8`'
mapped_wiki_inline_code['Module.arguments'] = ':js:attribute:`Module.arguments`'
mapped_wiki_inline_code['Module.destroy'] = ':js:func:`Module.destroy`'
mapped_wiki_inline_code['Module.destroy()'] = ':js:func:`Module.destroy`'
mapped_wiki_inline_code['Module.getPreloadedPackage'] = ':js:func:`Module.getPreloadedPackage`'
mapped_wiki_inline_code['Module.getPreloadedPackage()'] = ':js:func:`Module.getPreloadedPackage`'
mapped_wiki_inline_code['Module.instantiateWasm'] = ':js:func:`Module.instantiateWasm`'
mapped_wiki_inline_code['Module.instantiateWasm()'] = ':js:func:`Module.instantiateWasm`'
mapped_wiki_inline_code['Module.locateFile'] = ':js:attribute:`Module.locateFile`'
mapped_wiki_inline_code['Module.logReadFiles'] = ':js:attribute:`Module.logReadFiles`'
mapped_wiki_inline_code['Module.noExitRuntime'] = ':js:attribute:`Module.noExitRuntime`'
mapped_wiki_inline_code['Module.noInitialRun'] = ':js:attribute:`Module.noInitialRun`'
mapped_wiki_inline_code['Module.onAbort'] = ':js:attribute:`Module.onAbort`'
mapped_wiki_inline_code['Module.onCustomMessage'] = ':js:func:`Module.onCustomMessage`'
mapped_wiki_inline_code['Module.onCustomMessage()'] = ':js:func:`Module.onCustomMessage`'
mapped_wiki_inline_code['Module.onRuntimeInitialized'] = ':js:attribute:`Module.onRuntimeInitialized`'
mapped_wiki_inline_code['Module.preInit'] = ':js:attribute:`Module.preInit`'
mapped_wiki_inline_code['Module.preRun'] = ':js:attribute:`Module.preRun`'
mapped_wiki_inline_code['Module.preinitializedWebGLContext'] = ':js:attribute:`Module.preinitializedWebGLContext`'
mapped_wiki_inline_code['Module.print'] = ':js:attribute:`Module.print`'
mapped_wiki_inline_code['Module.printErr'] = ':js:attribute:`Module.printErr`'
mapped_wiki_inline_code['PointeeType>'] = ':cpp:type:`PointeeType>`'
mapped_wiki_inline_code['UTF16ToString'] = ':js:func:`UTF16ToString`'
mapped_wiki_inline_code['UTF16ToString()'] = ':js:func:`UTF16ToString`'
mapped_wiki_inline_code['UTF32ToString'] = ':js:func:`UTF32ToString`'
mapped_wiki_inline_code['UTF32ToString()'] = ':js:func:`UTF32ToString`'
mapped_wiki_inline_code['UTF8ToString'] = ':js:func:`UTF8ToString`'
mapped_wiki_inline_code['UTF8ToString()'] = ':js:func:`UTF8ToString`'
mapped_wiki_inline_code['V>>'] = ':cpp:func:`V>>`'
mapped_wiki_inline_code['V>>()'] = ':cpp:func:`V>>`'
mapped_wiki_inline_code['VRDisplayCapabilities'] = ':c:type:`VRDisplayCapabilities`'
mapped_wiki_inline_code['VREyeParameters'] = ':c:type:`VREyeParameters`'
mapped_wiki_inline_code['VRFrameData'] = ':c:type:`VRFrameData`'
mapped_wiki_inline_code['VRLayerInit'] = ':c:type:`VRLayerInit`'
mapped_wiki_inline_code['VRPose'] = ':c:type:`VRPose`'
mapped_wiki_inline_code['VRQuaternion'] = ':c:type:`VRQuaternion`'
mapped_wiki_inline_code['VRVector3'] = ':c:type:`VRVector3`'
mapped_wiki_inline_code['VR_EYE_LEFT'] = ':c:macro:`VR_EYE_LEFT`'
mapped_wiki_inline_code['VR_LAYER_DEFAULT_LEFT_BOUNDS'] = ':c:macro:`VR_LAYER_DEFAULT_LEFT_BOUNDS`'
mapped_wiki_inline_code['VR_POSE_POSITION'] = ':c:macro:`VR_POSE_POSITION`'
mapped_wiki_inline_code['__getDynamicPointerType'] = ':cpp:func:`__getDynamicPointerType`'
mapped_wiki_inline_code['__getDynamicPointerType()'] = ':cpp:func:`__getDynamicPointerType`'
mapped_wiki_inline_code['addRunDependency'] = ':js:func:`addRunDependency`'
mapped_wiki_inline_code['addRunDependency()'] = ':js:func:`addRunDependency`'
mapped_wiki_inline_code['allocate'] = ':js:func:`allocate`'
mapped_wiki_inline_code['allocate()'] = ':js:func:`allocate`'
mapped_wiki_inline_code['allow_raw_pointer'] = ':cpp:type:`allow_raw_pointer`'
mapped_wiki_inline_code['allow_raw_pointers'] = ':cpp:type:`allow_raw_pointers`'
mapped_wiki_inline_code['arg'] = ':cpp:type:`arg`'
mapped_wiki_inline_code['base'] = ':cpp:type:`base`'
mapped_wiki_inline_code['ccall'] = ':js:func:`ccall`'
mapped_wiki_inline_code['ccall()'] = ':js:func:`ccall`'
mapped_wiki_inline_code['char*'] = ':c:func:`char*`'
mapped_wiki_inline_code['char*()'] = ':c:func:`char*`'
mapped_wiki_inline_code['class_'] = ':cpp:class:`class_`'
mapped_wiki_inline_code['constant'] = ':cpp:func:`constant`'
mapped_wiki_inline_code['constant()'] = ':cpp:func:`constant`'
mapped_wiki_inline_code['constructor'] = ':cpp:type:`constructor`'
mapped_wiki_inline_code['cwrap'] = ':js:func:`cwrap`'
mapped_wiki_inline_code['cwrap()'] = ':js:func:`cwrap`'
mapped_wiki_inline_code['default_smart_ptr_trait'] = ':cpp:type:`default_smart_ptr_trait`'
mapped_wiki_inline_code['em_arg_callback_func'] = ':c:type:`em_arg_callback_func`'
mapped_wiki_inline_code['em_async_wget2_data_onerror_func'] = ':c:type:`em_async_wget2_data_onerror_func`'
mapped_wiki_inline_code['em_async_wget2_data_onload_func'] = ':c:type:`em_async_wget2_data_onload_func`'
mapped_wiki_inline_code['em_async_wget2_data_onprogress_func'] = ':c:type:`em_async_wget2_data_onprogress_func`'
mapped_wiki_inline_code['em_async_wget2_onload_func'] = ':c:type:`em_async_wget2_onload_func`'
mapped_wiki_inline_code['em_async_wget2_onstatus_func'] = ':c:type:`em_async_wget2_onstatus_func`'
mapped_wiki_inline_code['em_async_wget_onload_func'] = ':c:type:`em_async_wget_onload_func`'
mapped_wiki_inline_code['em_battery_callback_func'] = ':c:type:`em_battery_callback_func`'
mapped_wiki_inline_code['em_beforeunload_callback'] = ':c:type:`em_beforeunload_callback`'
mapped_wiki_inline_code['em_callback_func'] = ':c:type:`em_callback_func`'
mapped_wiki_inline_code['em_devicemotion_callback_func'] = ':c:type:`em_devicemotion_callback_func`'
mapped_wiki_inline_code['em_deviceorientation_callback_func'] = ':c:type:`em_deviceorientation_callback_func`'
mapped_wiki_inline_code['em_focus_callback_func'] = ':c:type:`em_focus_callback_func`'
mapped_wiki_inline_code['em_fullscreenchange_callback_func'] = ':c:type:`em_fullscreenchange_callback_func`'
mapped_wiki_inline_code['em_gamepad_callback_func'] = ':c:type:`em_gamepad_callback_func`'
mapped_wiki_inline_code['em_key_callback_func'] = ':c:type:`em_key_callback_func`'
mapped_wiki_inline_code['em_mouse_callback_func'] = ':c:type:`em_mouse_callback_func`'
mapped_wiki_inline_code['em_orientationchange_callback_func'] = ':c:type:`em_orientationchange_callback_func`'
mapped_wiki_inline_code['em_pointerlockchange_callback_func'] = ':c:type:`em_pointerlockchange_callback_func`'
mapped_wiki_inline_code['em_pointerlockerror_callback_func'] = ':c:type:`em_pointerlockerror_callback_func`'
mapped_wiki_inline_code['em_run_preload_plugins_data_onload_func'] = ':c:type:`em_run_preload_plugins_data_onload_func`'
mapped_wiki_inline_code['em_socket_callback'] = ':c:type:`em_socket_callback`'
mapped_wiki_inline_code['em_socket_error_callback'] = ':c:type:`em_socket_error_callback`'
mapped_wiki_inline_code['em_str_callback_func'] = ':c:type:`em_str_callback_func`'
mapped_wiki_inline_code['em_touch_callback_func'] = ':c:type:`em_touch_callback_func`'
mapped_wiki_inline_code['em_ui_callback_func'] = ':c:type:`em_ui_callback_func`'
mapped_wiki_inline_code['em_visibilitychange_callback_func'] = ':c:type:`em_visibilitychange_callback_func`'
mapped_wiki_inline_code['em_webgl_context_callback'] = ':c:type:`em_webgl_context_callback`'
mapped_wiki_inline_code['em_wheel_callback_func'] = ':c:type:`em_wheel_callback_func`'
mapped_wiki_inline_code['em_worker_callback_func'] = ':c:type:`em_worker_callback_func`'
mapped_wiki_inline_code['emscripten'] = ':cpp:namespace:`emscripten`'
mapped_wiki_inline_code['emscripten::val'] = ':cpp:class:`emscripten::val`'
mapped_wiki_inline_code['emscripten_align1_short'] = ':c:type:`emscripten_align1_short`'
mapped_wiki_inline_code['emscripten_async_call'] = ':c:func:`emscripten_async_call`'
mapped_wiki_inline_code['emscripten_async_call()'] = ':c:func:`emscripten_async_call`'
mapped_wiki_inline_code['emscripten_async_load_script'] = ':c:func:`emscripten_async_load_script`'
mapped_wiki_inline_code['emscripten_async_load_script()'] = ':c:func:`emscripten_async_load_script`'
mapped_wiki_inline_code['emscripten_async_run_script'] = ':c:func:`emscripten_async_run_script`'
mapped_wiki_inline_code['emscripten_async_run_script()'] = ':c:func:`emscripten_async_run_script`'
mapped_wiki_inline_code['emscripten_async_wget'] = ':c:func:`emscripten_async_wget`'
mapped_wiki_inline_code['emscripten_async_wget()'] = ':c:func:`emscripten_async_wget`'
mapped_wiki_inline_code['emscripten_async_wget2'] = ':c:func:`emscripten_async_wget2`'
mapped_wiki_inline_code['emscripten_async_wget2()'] = ':c:func:`emscripten_async_wget2`'
mapped_wiki_inline_code['emscripten_async_wget2_abort'] = ':c:func:`emscripten_async_wget2_abort`'
mapped_wiki_inline_code['emscripten_async_wget2_abort()'] = ':c:func:`emscripten_async_wget2_abort`'
mapped_wiki_inline_code['emscripten_async_wget2_data'] = ':c:func:`emscripten_async_wget2_data`'
mapped_wiki_inline_code['emscripten_async_wget2_data()'] = ':c:func:`emscripten_async_wget2_data`'
mapped_wiki_inline_code['emscripten_async_wget_data'] = ':c:func:`emscripten_async_wget_data`'
mapped_wiki_inline_code['emscripten_async_wget_data()'] = ':c:func:`emscripten_async_wget_data`'
mapped_wiki_inline_code['emscripten_call_worker'] = ':c:func:`emscripten_call_worker`'
mapped_wiki_inline_code['emscripten_call_worker()'] = ':c:func:`emscripten_call_worker`'
mapped_wiki_inline_code['emscripten_cancel_animation_frame'] = ':c:func:`emscripten_cancel_animation_frame`'
mapped_wiki_inline_code['emscripten_cancel_animation_frame()'] = ':c:func:`emscripten_cancel_animation_frame`'
mapped_wiki_inline_code['emscripten_cancel_main_loop'] = ':c:func:`emscripten_cancel_main_loop`'
mapped_wiki_inline_code['emscripten_cancel_main_loop()'] = ':c:func:`emscripten_cancel_main_loop`'
mapped_wiki_inline_code['emscripten_clear_immediate'] = ':c:func:`emscripten_clear_immediate`'
mapped_wiki_inline_code['emscripten_clear_immediate()'] = ':c:func:`emscripten_clear_immediate`'
mapped_wiki_inline_code['emscripten_clear_interval'] = ':c:func:`emscripten_clear_interval`'
mapped_wiki_inline_code['emscripten_clear_interval()'] = ':c:func:`emscripten_clear_interval`'
mapped_wiki_inline_code['emscripten_clear_timeout'] = ':c:func:`emscripten_clear_timeout`'
mapped_wiki_inline_code['emscripten_clear_timeout()'] = ':c:func:`emscripten_clear_timeout`'
mapped_wiki_inline_code['emscripten_console_error'] = ':c:func:`emscripten_console_error`'
mapped_wiki_inline_code['emscripten_console_error()'] = ':c:func:`emscripten_console_error`'
mapped_wiki_inline_code['emscripten_console_log'] = ':c:func:`emscripten_console_log`'
mapped_wiki_inline_code['emscripten_console_log()'] = ':c:func:`emscripten_console_log`'
mapped_wiki_inline_code['emscripten_console_warn'] = ':c:func:`emscripten_console_warn`'
mapped_wiki_inline_code['emscripten_console_warn()'] = ':c:func:`emscripten_console_warn`'
mapped_wiki_inline_code['emscripten_coroutine'] = ':c:type:`emscripten_coroutine`'
mapped_wiki_inline_code['emscripten_coroutine_create'] = ':c:func:`emscripten_coroutine_create`'
mapped_wiki_inline_code['emscripten_coroutine_create()'] = ':c:func:`emscripten_coroutine_create`'
mapped_wiki_inline_code['emscripten_coroutine_next'] = ':c:func:`emscripten_coroutine_next`'
mapped_wiki_inline_code['emscripten_coroutine_next()'] = ':c:func:`emscripten_coroutine_next`'
mapped_wiki_inline_code['emscripten_create_worker'] = ':c:func:`emscripten_create_worker`'
mapped_wiki_inline_code['emscripten_create_worker()'] = ':c:func:`emscripten_create_worker`'
mapped_wiki_inline_code['emscripten_date_now'] = ':c:func:`emscripten_date_now`'
mapped_wiki_inline_code['emscripten_date_now()'] = ':c:func:`emscripten_date_now`'
mapped_wiki_inline_code['emscripten_debugger'] = ':c:func:`emscripten_debugger`'
mapped_wiki_inline_code['emscripten_debugger()'] = ':c:func:`emscripten_debugger`'
mapped_wiki_inline_code['emscripten_destroy_worker'] = ':c:func:`emscripten_destroy_worker`'
mapped_wiki_inline_code['emscripten_destroy_worker()'] = ':c:func:`emscripten_destroy_worker`'
mapped_wiki_inline_code['emscripten_enter_soft_fullscreen'] = ':c:func:`emscripten_enter_soft_fullscreen`'
mapped_wiki_inline_code['emscripten_enter_soft_fullscreen()'] = ':c:func:`emscripten_enter_soft_fullscreen`'
mapped_wiki_inline_code['emscripten_exit_fullscreen'] = ':c:func:`emscripten_exit_fullscreen`'
mapped_wiki_inline_code['emscripten_exit_fullscreen()'] = ':c:func:`emscripten_exit_fullscreen`'
mapped_wiki_inline_code['emscripten_exit_pointerlock'] = ':c:func:`emscripten_exit_pointerlock`'
mapped_wiki_inline_code['emscripten_exit_pointerlock()'] = ':c:func:`emscripten_exit_pointerlock`'
mapped_wiki_inline_code['emscripten_exit_soft_fullscreen'] = ':c:func:`emscripten_exit_soft_fullscreen`'
mapped_wiki_inline_code['emscripten_exit_soft_fullscreen()'] = ':c:func:`emscripten_exit_soft_fullscreen`'
mapped_wiki_inline_code['emscripten_exit_with_live_runtime'] = ':c:func:`emscripten_exit_with_live_runtime`'
mapped_wiki_inline_code['emscripten_exit_with_live_runtime()'] = ':c:func:`emscripten_exit_with_live_runtime`'
mapped_wiki_inline_code['emscripten_force_exit'] = ':c:func:`emscripten_force_exit`'
mapped_wiki_inline_code['emscripten_force_exit()'] = ':c:func:`emscripten_force_exit`'
mapped_wiki_inline_code['emscripten_get_battery_status'] = ':c:func:`emscripten_get_battery_status`'
mapped_wiki_inline_code['emscripten_get_battery_status()'] = ':c:func:`emscripten_get_battery_status`'
mapped_wiki_inline_code['emscripten_get_callstack'] = ':c:func:`emscripten_get_callstack`'
mapped_wiki_inline_code['emscripten_get_callstack()'] = ':c:func:`emscripten_get_callstack`'
mapped_wiki_inline_code['emscripten_get_canvas_element_size'] = ':c:func:`emscripten_get_canvas_element_size`'
mapped_wiki_inline_code['emscripten_get_canvas_element_size()'] = ':c:func:`emscripten_get_canvas_element_size`'
mapped_wiki_inline_code['emscripten_get_compiler_setting'] = ':c:func:`emscripten_get_compiler_setting`'
mapped_wiki_inline_code['emscripten_get_compiler_setting()'] = ':c:func:`emscripten_get_compiler_setting`'
mapped_wiki_inline_code['emscripten_get_device_pixel_ratio'] = ':c:func:`emscripten_get_device_pixel_ratio`'
mapped_wiki_inline_code['emscripten_get_device_pixel_ratio()'] = ':c:func:`emscripten_get_device_pixel_ratio`'
mapped_wiki_inline_code['emscripten_get_devicemotion_status'] = ':c:func:`emscripten_get_devicemotion_status`'
mapped_wiki_inline_code['emscripten_get_devicemotion_status()'] = ':c:func:`emscripten_get_devicemotion_status`'
mapped_wiki_inline_code['emscripten_get_deviceorientation_status'] = ':c:func:`emscripten_get_deviceorientation_status`'
mapped_wiki_inline_code['emscripten_get_deviceorientation_status()'] = ':c:func:`emscripten_get_deviceorientation_status`'
mapped_wiki_inline_code['emscripten_get_element_css_size'] = ':c:func:`emscripten_get_element_css_size`'
mapped_wiki_inline_code['emscripten_get_element_css_size()'] = ':c:func:`emscripten_get_element_css_size`'
mapped_wiki_inline_code['emscripten_get_fullscreen_status'] = ':c:func:`emscripten_get_fullscreen_status`'
mapped_wiki_inline_code['emscripten_get_fullscreen_status()'] = ':c:func:`emscripten_get_fullscreen_status`'
mapped_wiki_inline_code['emscripten_get_gamepad_status'] = ':c:func:`emscripten_get_gamepad_status`'
mapped_wiki_inline_code['emscripten_get_gamepad_status()'] = ':c:func:`emscripten_get_gamepad_status`'
mapped_wiki_inline_code['emscripten_get_main_loop_timing'] = ':c:func:`emscripten_get_main_loop_timing`'
mapped_wiki_inline_code['emscripten_get_main_loop_timing()'] = ':c:func:`emscripten_get_main_loop_timing`'
mapped_wiki_inline_code['emscripten_get_mouse_status'] = ':c:func:`emscripten_get_mouse_status`'
mapped_wiki_inline_code['emscripten_get_mouse_status()'] = ':c:func:`emscripten_get_mouse_status`'
mapped_wiki_inline_code['emscripten_get_now'] = ':c:func:`emscripten_get_now`'
mapped_wiki_inline_code['emscripten_get_now()'] = ':c:func:`emscripten_get_now`'
mapped_wiki_inline_code['emscripten_get_num_gamepads'] = ':c:func:`emscripten_get_num_gamepads`'
mapped_wiki_inline_code['emscripten_get_num_gamepads()'] = ':c:func:`emscripten_get_num_gamepads`'
mapped_wiki_inline_code['emscripten_get_orientation_status'] = ':c:func:`emscripten_get_orientation_status`'
mapped_wiki_inline_code['emscripten_get_orientation_status()'] = ':c:func:`emscripten_get_orientation_status`'
mapped_wiki_inline_code['emscripten_get_pointerlock_status'] = ':c:func:`emscripten_get_pointerlock_status`'
mapped_wiki_inline_code['emscripten_get_pointerlock_status()'] = ':c:func:`emscripten_get_pointerlock_status`'
mapped_wiki_inline_code['emscripten_get_visibility_status'] = ':c:func:`emscripten_get_visibility_status`'
mapped_wiki_inline_code['emscripten_get_visibility_status()'] = ':c:func:`emscripten_get_visibility_status`'
mapped_wiki_inline_code['emscripten_get_worker_queue_size'] = ':c:func:`emscripten_get_worker_queue_size`'
mapped_wiki_inline_code['emscripten_get_worker_queue_size()'] = ':c:func:`emscripten_get_worker_queue_size`'
mapped_wiki_inline_code['emscripten_hide_mouse'] = ':c:func:`emscripten_hide_mouse`'
mapped_wiki_inline_code['emscripten_hide_mouse()'] = ':c:func:`emscripten_hide_mouse`'
mapped_wiki_inline_code['emscripten_idb_async_delete'] = ':c:func:`emscripten_idb_async_delete`'
mapped_wiki_inline_code['emscripten_idb_async_delete()'] = ':c:func:`emscripten_idb_async_delete`'
mapped_wiki_inline_code['emscripten_idb_async_exists'] = ':c:func:`emscripten_idb_async_exists`'
mapped_wiki_inline_code['emscripten_idb_async_exists()'] = ':c:func:`emscripten_idb_async_exists`'
mapped_wiki_inline_code['emscripten_idb_async_load'] = ':c:func:`emscripten_idb_async_load`'
mapped_wiki_inline_code['emscripten_idb_async_load()'] = ':c:func:`emscripten_idb_async_load`'
mapped_wiki_inline_code['emscripten_idb_async_store'] = ':c:func:`emscripten_idb_async_store`'
mapped_wiki_inline_code['emscripten_idb_async_store()'] = ':c:func:`emscripten_idb_async_store`'
mapped_wiki_inline_code['emscripten_idb_delete'] = ':c:func:`emscripten_idb_delete`'
mapped_wiki_inline_code['emscripten_idb_delete()'] = ':c:func:`emscripten_idb_delete`'
mapped_wiki_inline_code['emscripten_idb_exists'] = ':c:func:`emscripten_idb_exists`'
mapped_wiki_inline_code['emscripten_idb_exists()'] = ':c:func:`emscripten_idb_exists`'
mapped_wiki_inline_code['emscripten_idb_load'] = ':c:func:`emscripten_idb_load`'
mapped_wiki_inline_code['emscripten_idb_load()'] = ':c:func:`emscripten_idb_load`'
mapped_wiki_inline_code['emscripten_idb_store'] = ':c:func:`emscripten_idb_store`'
mapped_wiki_inline_code['emscripten_idb_store()'] = ':c:func:`emscripten_idb_store`'
mapped_wiki_inline_code['emscripten_is_webgl_context_lost'] = ':c:func:`emscripten_is_webgl_context_lost`'
mapped_wiki_inline_code['emscripten_is_webgl_context_lost()'] = ':c:func:`emscripten_is_webgl_context_lost`'
mapped_wiki_inline_code['emscripten_lock_orientation'] = ':c:func:`emscripten_lock_orientation`'
mapped_wiki_inline_code['emscripten_lock_orientation()'] = ':c:func:`emscripten_lock_orientation`'
mapped_wiki_inline_code['emscripten_log'] = ':c:func:`emscripten_log`'
mapped_wiki_inline_code['emscripten_log()'] = ':c:func:`emscripten_log`'
mapped_wiki_inline_code['emscripten_pause_main_loop'] = ':c:func:`emscripten_pause_main_loop`'
mapped_wiki_inline_code['emscripten_pause_main_loop()'] = ':c:func:`emscripten_pause_main_loop`'
mapped_wiki_inline_code['emscripten_performance_now'] = ':c:func:`emscripten_performance_now`'
mapped_wiki_inline_code['emscripten_performance_now()'] = ':c:func:`emscripten_performance_now`'
mapped_wiki_inline_code['emscripten_print_double'] = ':c:func:`emscripten_print_double`'
mapped_wiki_inline_code['emscripten_print_double()'] = ':c:func:`emscripten_print_double`'
mapped_wiki_inline_code['emscripten_push_main_loop_blocker'] = ':c:func:`emscripten_push_main_loop_blocker`'
mapped_wiki_inline_code['emscripten_push_main_loop_blocker()'] = ':c:func:`emscripten_push_main_loop_blocker`'
mapped_wiki_inline_code['emscripten_random'] = ':c:func:`emscripten_random`'
mapped_wiki_inline_code['emscripten_random()'] = ':c:func:`emscripten_random`'
mapped_wiki_inline_code['emscripten_request_animation_frame'] = ':c:func:`emscripten_request_animation_frame`'
mapped_wiki_inline_code['emscripten_request_animation_frame()'] = ':c:func:`emscripten_request_animation_frame`'
mapped_wiki_inline_code['emscripten_request_animation_frame_loop'] = ':c:func:`emscripten_request_animation_frame_loop`'
mapped_wiki_inline_code['emscripten_request_animation_frame_loop()'] = ':c:func:`emscripten_request_animation_frame_loop`'
mapped_wiki_inline_code['emscripten_request_fullscreen'] = ':c:func:`emscripten_request_fullscreen`'
mapped_wiki_inline_code['emscripten_request_fullscreen()'] = ':c:func:`emscripten_request_fullscreen`'
mapped_wiki_inline_code['emscripten_request_fullscreen_strategy'] = ':c:func:`emscripten_request_fullscreen_strategy`'
mapped_wiki_inline_code['emscripten_request_fullscreen_strategy()'] = ':c:func:`emscripten_request_fullscreen_strategy`'
mapped_wiki_inline_code['emscripten_request_pointerlock'] = ':c:func:`emscripten_request_pointerlock`'
mapped_wiki_inline_code['emscripten_request_pointerlock()'] = ':c:func:`emscripten_request_pointerlock`'
mapped_wiki_inline_code['emscripten_run_preload_plugins'] = ':c:func:`emscripten_run_preload_plugins`'
mapped_wiki_inline_code['emscripten_run_preload_plugins()'] = ':c:func:`emscripten_run_preload_plugins`'
mapped_wiki_inline_code['emscripten_run_preload_plugins_data'] = ':c:func:`emscripten_run_preload_plugins_data`'
mapped_wiki_inline_code['emscripten_run_preload_plugins_data()'] = ':c:func:`emscripten_run_preload_plugins_data`'
mapped_wiki_inline_code['emscripten_run_script'] = ':c:func:`emscripten_run_script`'
mapped_wiki_inline_code['emscripten_run_script()'] = ':c:func:`emscripten_run_script`'
mapped_wiki_inline_code['emscripten_run_script_int'] = ':c:func:`emscripten_run_script_int`'
mapped_wiki_inline_code['emscripten_run_script_int()'] = ':c:func:`emscripten_run_script_int`'
mapped_wiki_inline_code['emscripten_sample_gamepad_data'] = ':c:func:`emscripten_sample_gamepad_data`'
mapped_wiki_inline_code['emscripten_sample_gamepad_data()'] = ':c:func:`emscripten_sample_gamepad_data`'
mapped_wiki_inline_code['emscripten_set_batterychargingchange_callback'] = ':c:func:`emscripten_set_batterychargingchange_callback`'
mapped_wiki_inline_code['emscripten_set_batterychargingchange_callback()'] = ':c:func:`emscripten_set_batterychargingchange_callback`'
mapped_wiki_inline_code['emscripten_set_beforeunload_callback'] = ':c:func:`emscripten_set_beforeunload_callback`'
mapped_wiki_inline_code['emscripten_set_beforeunload_callback()'] = ':c:func:`emscripten_set_beforeunload_callback`'
mapped_wiki_inline_code['emscripten_set_blur_callback'] = ':c:func:`emscripten_set_blur_callback`'
mapped_wiki_inline_code['emscripten_set_blur_callback()'] = ':c:func:`emscripten_set_blur_callback`'
mapped_wiki_inline_code['emscripten_set_canvas_element_size'] = ':c:func:`emscripten_set_canvas_element_size`'
mapped_wiki_inline_code['emscripten_set_canvas_element_size()'] = ':c:func:`emscripten_set_canvas_element_size`'
mapped_wiki_inline_code['emscripten_set_click_callback'] = ':c:func:`emscripten_set_click_callback`'
mapped_wiki_inline_code['emscripten_set_click_callback()'] = ':c:func:`emscripten_set_click_callback`'
mapped_wiki_inline_code['emscripten_set_devicemotion_callback'] = ':c:func:`emscripten_set_devicemotion_callback`'
mapped_wiki_inline_code['emscripten_set_devicemotion_callback()'] = ':c:func:`emscripten_set_devicemotion_callback`'
mapped_wiki_inline_code['emscripten_set_deviceorientation_callback'] = ':c:func:`emscripten_set_deviceorientation_callback`'
mapped_wiki_inline_code['emscripten_set_deviceorientation_callback()'] = ':c:func:`emscripten_set_deviceorientation_callback`'
mapped_wiki_inline_code['emscripten_set_element_css_size'] = ':c:func:`emscripten_set_element_css_size`'
mapped_wiki_inline_code['emscripten_set_element_css_size()'] = ':c:func:`emscripten_set_element_css_size`'
mapped_wiki_inline_code['emscripten_set_fullscreenchange_callback'] = ':c:func:`emscripten_set_fullscreenchange_callback`'
mapped_wiki_inline_code['emscripten_set_fullscreenchange_callback()'] = ':c:func:`emscripten_set_fullscreenchange_callback`'
mapped_wiki_inline_code['emscripten_set_gamepadconnected_callback'] = ':c:func:`emscripten_set_gamepadconnected_callback`'
mapped_wiki_inline_code['emscripten_set_gamepadconnected_callback()'] = ':c:func:`emscripten_set_gamepadconnected_callback`'
mapped_wiki_inline_code['emscripten_set_immediate'] = ':c:func:`emscripten_set_immediate`'
mapped_wiki_inline_code['emscripten_set_immediate()'] = ':c:func:`emscripten_set_immediate`'
mapped_wiki_inline_code['emscripten_set_immediate_loop'] = ':c:func:`emscripten_set_immediate_loop`'
mapped_wiki_inline_code['emscripten_set_immediate_loop()'] = ':c:func:`emscripten_set_immediate_loop`'
mapped_wiki_inline_code['emscripten_set_interval'] = ':c:func:`emscripten_set_interval`'
mapped_wiki_inline_code['emscripten_set_interval()'] = ':c:func:`emscripten_set_interval`'
mapped_wiki_inline_code['emscripten_set_keypress_callback'] = ':c:func:`emscripten_set_keypress_callback`'
mapped_wiki_inline_code['emscripten_set_keypress_callback()'] = ':c:func:`emscripten_set_keypress_callback`'
mapped_wiki_inline_code['emscripten_set_main_loop'] = ':c:func:`emscripten_set_main_loop`'
mapped_wiki_inline_code['emscripten_set_main_loop()'] = ':c:func:`emscripten_set_main_loop`'
mapped_wiki_inline_code['emscripten_set_main_loop_arg'] = ':c:func:`emscripten_set_main_loop_arg`'
mapped_wiki_inline_code['emscripten_set_main_loop_arg()'] = ':c:func:`emscripten_set_main_loop_arg`'
mapped_wiki_inline_code['emscripten_set_main_loop_expected_blockers'] = ':c:func:`emscripten_set_main_loop_expected_blockers`'
mapped_wiki_inline_code['emscripten_set_main_loop_expected_blockers()'] = ':c:func:`emscripten_set_main_loop_expected_blockers`'
mapped_wiki_inline_code['emscripten_set_main_loop_timing'] = ':c:func:`emscripten_set_main_loop_timing`'
mapped_wiki_inline_code['emscripten_set_main_loop_timing()'] = ':c:func:`emscripten_set_main_loop_timing`'
mapped_wiki_inline_code['emscripten_set_orientationchange_callback'] = ':c:func:`emscripten_set_orientationchange_callback`'
mapped_wiki_inline_code['emscripten_set_orientationchange_callback()'] = ':c:func:`emscripten_set_orientationchange_callback`'
mapped_wiki_inline_code['emscripten_set_pointerlockchange_callback'] = ':c:func:`emscripten_set_pointerlockchange_callback`'
mapped_wiki_inline_code['emscripten_set_pointerlockchange_callback()'] = ':c:func:`emscripten_set_pointerlockchange_callback`'
mapped_wiki_inline_code['emscripten_set_pointerlockerror_callback'] = ':c:func:`emscripten_set_pointerlockerror_callback`'
mapped_wiki_inline_code['emscripten_set_pointerlockerror_callback()'] = ':c:func:`emscripten_set_pointerlockerror_callback`'
mapped_wiki_inline_code['emscripten_set_resize_callback'] = ':c:func:`emscripten_set_resize_callback`'
mapped_wiki_inline_code['emscripten_set_resize_callback()'] = ':c:func:`emscripten_set_resize_callback`'
mapped_wiki_inline_code['emscripten_set_socket_close_callback'] = ':c:func:`emscripten_set_socket_close_callback`'
mapped_wiki_inline_code['emscripten_set_socket_close_callback()'] = ':c:func:`emscripten_set_socket_close_callback`'
mapped_wiki_inline_code['emscripten_set_socket_connection_callback'] = ':c:func:`emscripten_set_socket_connection_callback`'
mapped_wiki_inline_code['emscripten_set_socket_connection_callback()'] = ':c:func:`emscripten_set_socket_connection_callback`'
mapped_wiki_inline_code['emscripten_set_socket_error_callback'] = ':c:func:`emscripten_set_socket_error_callback`'
mapped_wiki_inline_code['emscripten_set_socket_error_callback()'] = ':c:func:`emscripten_set_socket_error_callback`'
mapped_wiki_inline_code['emscripten_set_socket_listen_callback'] = ':c:func:`emscripten_set_socket_listen_callback`'
mapped_wiki_inline_code['emscripten_set_socket_listen_callback()'] = ':c:func:`emscripten_set_socket_listen_callback`'
mapped_wiki_inline_code['emscripten_set_socket_message_callback'] = ':c:func:`emscripten_set_socket_message_callback`'
mapped_wiki_inline_code['emscripten_set_socket_message_callback()'] = ':c:func:`emscripten_set_socket_message_callback`'
mapped_wiki_inline_code['emscripten_set_socket_open_callback'] = ':c:func:`emscripten_set_socket_open_callback`'
mapped_wiki_inline_code['emscripten_set_socket_open_callback()'] = ':c:func:`emscripten_set_socket_open_callback`'
mapped_wiki_inline_code['emscripten_set_timeout'] = ':c:func:`emscripten_set_timeout`'
mapped_wiki_inline_code['emscripten_set_timeout()'] = ':c:func:`emscripten_set_timeout`'
mapped_wiki_inline_code['emscripten_set_timeout_loop'] = ':c:func:`emscripten_set_timeout_loop`'
mapped_wiki_inline_code['emscripten_set_timeout_loop()'] = ':c:func:`emscripten_set_timeout_loop`'
mapped_wiki_inline_code['emscripten_set_touchstart_callback'] = ':c:func:`emscripten_set_touchstart_callback`'
mapped_wiki_inline_code['emscripten_set_touchstart_callback()'] = ':c:func:`emscripten_set_touchstart_callback`'
mapped_wiki_inline_code['emscripten_set_visibilitychange_callback'] = ':c:func:`emscripten_set_visibilitychange_callback`'
mapped_wiki_inline_code['emscripten_set_visibilitychange_callback()'] = ':c:func:`emscripten_set_visibilitychange_callback`'
mapped_wiki_inline_code['emscripten_set_webglcontextlost_callback'] = ':c:func:`emscripten_set_webglcontextlost_callback`'
mapped_wiki_inline_code['emscripten_set_webglcontextlost_callback()'] = ':c:func:`emscripten_set_webglcontextlost_callback`'
mapped_wiki_inline_code['emscripten_set_wheel_callback'] = ':c:func:`emscripten_set_wheel_callback`'
mapped_wiki_inline_code['emscripten_set_wheel_callback()'] = ':c:func:`emscripten_set_wheel_callback`'
mapped_wiki_inline_code['emscripten_sleep'] = ':c:func:`emscripten_sleep`'
mapped_wiki_inline_code['emscripten_sleep()'] = ':c:func:`emscripten_sleep`'
mapped_wiki_inline_code['emscripten_sleep_with_yield'] = ':c:func:`emscripten_sleep_with_yield`'
mapped_wiki_inline_code['emscripten_sleep_with_yield()'] = ':c:func:`emscripten_sleep_with_yield`'
mapped_wiki_inline_code['emscripten_throw_number'] = ':c:func:`emscripten_throw_number`'
mapped_wiki_inline_code['emscripten_throw_number()'] = ':c:func:`emscripten_throw_number`'
mapped_wiki_inline_code['emscripten_throw_string'] = ':c:func:`emscripten_throw_string`'
mapped_wiki_inline_code['emscripten_throw_string()'] = ':c:func:`emscripten_throw_string`'
mapped_wiki_inline_code['emscripten_trace_annotate_address_type'] = ':c:func:`emscripten_trace_annotate_address_type`'
mapped_wiki_inline_code['emscripten_trace_annotate_address_type()'] = ':c:func:`emscripten_trace_annotate_address_type`'
mapped_wiki_inline_code['emscripten_trace_associate_storage_size'] = ':c:func:`emscripten_trace_associate_storage_size`'
mapped_wiki_inline_code['emscripten_trace_associate_storage_size()'] = ':c:func:`emscripten_trace_associate_storage_size`'
mapped_wiki_inline_code['emscripten_trace_close'] = ':c:func:`emscripten_trace_close`'
mapped_wiki_inline_code['emscripten_trace_close()'] = ':c:func:`emscripten_trace_close`'
mapped_wiki_inline_code['emscripten_trace_configure'] = ':c:func:`emscripten_trace_configure`'
mapped_wiki_inline_code['emscripten_trace_configure()'] = ':c:func:`emscripten_trace_configure`'
mapped_wiki_inline_code['emscripten_trace_configure_for_google_wtf'] = ':c:func:`emscripten_trace_configure_for_google_wtf`'
mapped_wiki_inline_code['emscripten_trace_configure_for_google_wtf()'] = ':c:func:`emscripten_trace_configure_for_google_wtf`'
mapped_wiki_inline_code['emscripten_trace_enter_context'] = ':c:func:`emscripten_trace_enter_context`'
mapped_wiki_inline_code['emscripten_trace_enter_context()'] = ':c:func:`emscripten_trace_enter_context`'
mapped_wiki_inline_code['emscripten_trace_exit_context'] = ':c:func:`emscripten_trace_exit_context`'
mapped_wiki_inline_code['emscripten_trace_exit_context()'] = ':c:func:`emscripten_trace_exit_context`'
mapped_wiki_inline_code['emscripten_trace_log_message'] = ':c:func:`emscripten_trace_log_message`'
mapped_wiki_inline_code['emscripten_trace_log_message()'] = ':c:func:`emscripten_trace_log_message`'
mapped_wiki_inline_code['emscripten_trace_mark'] = ':c:func:`emscripten_trace_mark`'
mapped_wiki_inline_code['emscripten_trace_mark()'] = ':c:func:`emscripten_trace_mark`'
mapped_wiki_inline_code['emscripten_trace_record_allocation'] = ':c:func:`emscripten_trace_record_allocation`'
mapped_wiki_inline_code['emscripten_trace_record_allocation()'] = ':c:func:`emscripten_trace_record_allocation`'
mapped_wiki_inline_code['emscripten_trace_record_frame_end'] = ':c:func:`emscripten_trace_record_frame_end`'
mapped_wiki_inline_code['emscripten_trace_record_frame_end()'] = ':c:func:`emscripten_trace_record_frame_end`'
mapped_wiki_inline_code['emscripten_trace_record_frame_start'] = ':c:func:`emscripten_trace_record_frame_start`'
mapped_wiki_inline_code['emscripten_trace_record_frame_start()'] = ':c:func:`emscripten_trace_record_frame_start`'
mapped_wiki_inline_code['emscripten_trace_record_free'] = ':c:func:`emscripten_trace_record_free`'
mapped_wiki_inline_code['emscripten_trace_record_free()'] = ':c:func:`emscripten_trace_record_free`'
mapped_wiki_inline_code['emscripten_trace_record_reallocation'] = ':c:func:`emscripten_trace_record_reallocation`'
mapped_wiki_inline_code['emscripten_trace_record_reallocation()'] = ':c:func:`emscripten_trace_record_reallocation`'
mapped_wiki_inline_code['emscripten_trace_report_error'] = ':c:func:`emscripten_trace_report_error`'
mapped_wiki_inline_code['emscripten_trace_report_error()'] = ':c:func:`emscripten_trace_report_error`'
mapped_wiki_inline_code['emscripten_trace_report_memory_layout'] = ':c:func:`emscripten_trace_report_memory_layout`'
mapped_wiki_inline_code['emscripten_trace_report_memory_layout()'] = ':c:func:`emscripten_trace_report_memory_layout`'
mapped_wiki_inline_code['emscripten_trace_report_off_heap_data'] = ':c:func:`emscripten_trace_report_off_heap_data`'
mapped_wiki_inline_code['emscripten_trace_report_off_heap_data()'] = ':c:func:`emscripten_trace_report_off_heap_data`'
mapped_wiki_inline_code['emscripten_trace_set_enabled'] = ':c:func:`emscripten_trace_set_enabled`'
mapped_wiki_inline_code['emscripten_trace_set_enabled()'] = ':c:func:`emscripten_trace_set_enabled`'
mapped_wiki_inline_code['emscripten_trace_set_session_username'] = ':c:func:`emscripten_trace_set_session_username`'
mapped_wiki_inline_code['emscripten_trace_set_session_username()'] = ':c:func:`emscripten_trace_set_session_username`'
mapped_wiki_inline_code['emscripten_trace_task_associate_data'] = ':c:func:`emscripten_trace_task_associate_data`'
mapped_wiki_inline_code['emscripten_trace_task_associate_data()'] = ':c:func:`emscripten_trace_task_associate_data`'
mapped_wiki_inline_code['emscripten_trace_task_end'] = ':c:func:`emscripten_trace_task_end`'
mapped_wiki_inline_code['emscripten_trace_task_end()'] = ':c:func:`emscripten_trace_task_end`'
mapped_wiki_inline_code['emscripten_trace_task_resume'] = ':c:func:`emscripten_trace_task_resume`'
mapped_wiki_inline_code['emscripten_trace_task_resume()'] = ':c:func:`emscripten_trace_task_resume`'
mapped_wiki_inline_code['emscripten_trace_task_start'] = ':c:func:`emscripten_trace_task_start`'
mapped_wiki_inline_code['emscripten_trace_task_start()'] = ':c:func:`emscripten_trace_task_start`'
mapped_wiki_inline_code['emscripten_trace_task_suspend'] = ':c:func:`emscripten_trace_task_suspend`'
mapped_wiki_inline_code['emscripten_trace_task_suspend()'] = ':c:func:`emscripten_trace_task_suspend`'
mapped_wiki_inline_code['emscripten_unlock_orientation'] = ':c:func:`emscripten_unlock_orientation`'
mapped_wiki_inline_code['emscripten_unlock_orientation()'] = ':c:func:`emscripten_unlock_orientation`'
mapped_wiki_inline_code['emscripten_vibrate'] = ':c:func:`emscripten_vibrate`'
mapped_wiki_inline_code['emscripten_vibrate()'] = ':c:func:`emscripten_vibrate`'
mapped_wiki_inline_code['emscripten_vibrate_pattern'] = ':c:func:`emscripten_vibrate_pattern`'
mapped_wiki_inline_code['emscripten_vibrate_pattern()'] = ':c:func:`emscripten_vibrate_pattern`'
mapped_wiki_inline_code['emscripten_vr_cancel_display_render_loop'] = ':c:func:`emscripten_vr_cancel_display_render_loop`'
mapped_wiki_inline_code['emscripten_vr_cancel_display_render_loop()'] = ':c:func:`emscripten_vr_cancel_display_render_loop`'
mapped_wiki_inline_code['emscripten_vr_count_displays'] = ':c:func:`emscripten_vr_count_displays`'
mapped_wiki_inline_code['emscripten_vr_count_displays()'] = ':c:func:`emscripten_vr_count_displays`'
mapped_wiki_inline_code['emscripten_vr_deinit'] = ':c:func:`emscripten_vr_deinit`'
mapped_wiki_inline_code['emscripten_vr_deinit()'] = ':c:func:`emscripten_vr_deinit`'
mapped_wiki_inline_code['emscripten_vr_display_connected'] = ':c:func:`emscripten_vr_display_connected`'
mapped_wiki_inline_code['emscripten_vr_display_connected()'] = ':c:func:`emscripten_vr_display_connected`'
mapped_wiki_inline_code['emscripten_vr_display_presenting'] = ':c:func:`emscripten_vr_display_presenting`'
mapped_wiki_inline_code['emscripten_vr_display_presenting()'] = ':c:func:`emscripten_vr_display_presenting`'
mapped_wiki_inline_code['emscripten_vr_exit_present'] = ':c:func:`emscripten_vr_exit_present`'
mapped_wiki_inline_code['emscripten_vr_exit_present()'] = ':c:func:`emscripten_vr_exit_present`'
mapped_wiki_inline_code['emscripten_vr_get_display_capabilities'] = ':c:func:`emscripten_vr_get_display_capabilities`'
mapped_wiki_inline_code['emscripten_vr_get_display_capabilities()'] = ':c:func:`emscripten_vr_get_display_capabilities`'
mapped_wiki_inline_code['emscripten_vr_get_display_handle'] = ':c:func:`emscripten_vr_get_display_handle`'
mapped_wiki_inline_code['emscripten_vr_get_display_handle()'] = ':c:func:`emscripten_vr_get_display_handle`'
mapped_wiki_inline_code['emscripten_vr_get_eye_parameters'] = ':c:func:`emscripten_vr_get_eye_parameters`'
mapped_wiki_inline_code['emscripten_vr_get_eye_parameters()'] = ':c:func:`emscripten_vr_get_eye_parameters`'
mapped_wiki_inline_code['emscripten_vr_get_frame_data'] = ':c:func:`emscripten_vr_get_frame_data`'
mapped_wiki_inline_code['emscripten_vr_get_frame_data()'] = ':c:func:`emscripten_vr_get_frame_data`'
mapped_wiki_inline_code['emscripten_vr_init'] = ':c:func:`emscripten_vr_init`'
mapped_wiki_inline_code['emscripten_vr_init()'] = ':c:func:`emscripten_vr_init`'
mapped_wiki_inline_code['emscripten_vr_ready'] = ':c:func:`emscripten_vr_ready`'
mapped_wiki_inline_code['emscripten_vr_ready()'] = ':c:func:`emscripten_vr_ready`'
mapped_wiki_inline_code['emscripten_vr_request_present'] = ':c:func:`emscripten_vr_request_present`'
mapped_wiki_inline_code['emscripten_vr_request_present()'] = ':c:func:`emscripten_vr_request_present`'
mapped_wiki_inline_code['emscripten_vr_set_display_render_loop'] = ':c:func:`emscripten_vr_set_display_render_loop`'
mapped_wiki_inline_code['emscripten_vr_set_display_render_loop()'] = ':c:func:`emscripten_vr_set_display_render_loop`'
mapped_wiki_inline_code['emscripten_vr_set_display_render_loop_arg'] = ':c:func:`emscripten_vr_set_display_render_loop_arg`'
mapped_wiki_inline_code['emscripten_vr_set_display_render_loop_arg()'] = ':c:func:`emscripten_vr_set_display_render_loop_arg`'
mapped_wiki_inline_code['emscripten_vr_submit_frame'] = ':c:func:`emscripten_vr_submit_frame`'
mapped_wiki_inline_code['emscripten_vr_submit_frame()'] = ':c:func:`emscripten_vr_submit_frame`'
mapped_wiki_inline_code['emscripten_vr_version_major'] = ':c:func:`emscripten_vr_version_major`'
mapped_wiki_inline_code['emscripten_vr_version_major()'] = ':c:func:`emscripten_vr_version_major`'
mapped_wiki_inline_code['emscripten_vr_version_minor'] = ':c:func:`emscripten_vr_version_minor`'
mapped_wiki_inline_code['emscripten_vr_version_minor()'] = ':c:func:`emscripten_vr_version_minor`'
mapped_wiki_inline_code['emscripten_webgl_commit_frame'] = ':c:func:`emscripten_webgl_commit_frame`'
mapped_wiki_inline_code['emscripten_webgl_commit_frame()'] = ':c:func:`emscripten_webgl_commit_frame`'
mapped_wiki_inline_code['emscripten_webgl_create_context'] = ':c:func:`emscripten_webgl_create_context`'
mapped_wiki_inline_code['emscripten_webgl_create_context()'] = ':c:func:`emscripten_webgl_create_context`'
mapped_wiki_inline_code['emscripten_webgl_destroy_context'] = ':c:func:`emscripten_webgl_destroy_context`'
mapped_wiki_inline_code['emscripten_webgl_destroy_context()'] = ':c:func:`emscripten_webgl_destroy_context`'
mapped_wiki_inline_code['emscripten_webgl_enable_extension'] = ':c:func:`emscripten_webgl_enable_extension`'
mapped_wiki_inline_code['emscripten_webgl_enable_extension()'] = ':c:func:`emscripten_webgl_enable_extension`'
mapped_wiki_inline_code['emscripten_webgl_get_context_attributes'] = ':c:func:`emscripten_webgl_get_context_attributes`'
mapped_wiki_inline_code['emscripten_webgl_get_context_attributes()'] = ':c:func:`emscripten_webgl_get_context_attributes`'
mapped_wiki_inline_code['emscripten_webgl_get_current_context'] = ':c:func:`emscripten_webgl_get_current_context`'
mapped_wiki_inline_code['emscripten_webgl_get_current_context()'] = ':c:func:`emscripten_webgl_get_current_context`'
mapped_wiki_inline_code['emscripten_webgl_get_drawing_buffer_size'] = ':c:func:`emscripten_webgl_get_drawing_buffer_size`'
mapped_wiki_inline_code['emscripten_webgl_get_drawing_buffer_size()'] = ':c:func:`emscripten_webgl_get_drawing_buffer_size`'
mapped_wiki_inline_code['emscripten_webgl_init_context_attributes'] = ':c:func:`emscripten_webgl_init_context_attributes`'
mapped_wiki_inline_code['emscripten_webgl_init_context_attributes()'] = ':c:func:`emscripten_webgl_init_context_attributes`'
mapped_wiki_inline_code['emscripten_webgl_make_context_current'] = ':c:func:`emscripten_webgl_make_context_current`'
mapped_wiki_inline_code['emscripten_webgl_make_context_current()'] = ':c:func:`emscripten_webgl_make_context_current`'
mapped_wiki_inline_code['emscripten_wget'] = ':c:func:`emscripten_wget`'
mapped_wiki_inline_code['emscripten_wget()'] = ':c:func:`emscripten_wget`'
mapped_wiki_inline_code['emscripten_wget_data'] = ':c:func:`emscripten_wget_data`'
mapped_wiki_inline_code['emscripten_wget_data()'] = ':c:func:`emscripten_wget_data`'
mapped_wiki_inline_code['emscripten_worker_respond'] = ':c:func:`emscripten_worker_respond`'
mapped_wiki_inline_code['emscripten_worker_respond()'] = ':c:func:`emscripten_worker_respond`'
mapped_wiki_inline_code['emscripten_yield'] = ':c:func:`emscripten_yield`'
mapped_wiki_inline_code['emscripten_yield()'] = ':c:func:`emscripten_yield`'
mapped_wiki_inline_code['enum_'] = ':cpp:class:`enum_`'
mapped_wiki_inline_code['function'] = ':cpp:func:`function`'
mapped_wiki_inline_code['function()'] = ':cpp:func:`function`'
mapped_wiki_inline_code['getValue'] = ':js:func:`getValue`'
mapped_wiki_inline_code['getValue()'] = ':js:func:`getValue`'
mapped_wiki_inline_code['intArrayFromString'] = ':js:func:`intArrayFromString`'
mapped_wiki_inline_code['intArrayFromString()'] = ':js:func:`intArrayFromString`'
mapped_wiki_inline_code['intArrayToString'] = ':js:func:`intArrayToString`'
mapped_wiki_inline_code['intArrayToString()'] = ':js:func:`intArrayToString`'
mapped_wiki_inline_code['internal::CalculateLambdaSignature<LambdaType>::type'] = ':cpp:func:`internal::CalculateLambdaSignature<LambdaType>::type`'
mapped_wiki_inline_code['internal::CalculateLambdaSignature<LambdaType>::type()'] = ':cpp:func:`internal::CalculateLambdaSignature<LambdaType>::type`'
mapped_wiki_inline_code['internal::MemberFunctionType<ClassType,'] = ':cpp:func:`internal::MemberFunctionType<ClassType,`'
mapped_wiki_inline_code['internal::MemberFunctionType<ClassType,()'] = ':cpp:func:`internal::MemberFunctionType<ClassType,`'
mapped_wiki_inline_code['pure_virtual'] = ':cpp:type:`pure_virtual`'
mapped_wiki_inline_code['register_vector'] = ':cpp:func:`register_vector`'
mapped_wiki_inline_code['register_vector()'] = ':cpp:func:`register_vector`'
mapped_wiki_inline_code['removeRunDependency'] = ':js:func:`removeRunDependency`'
mapped_wiki_inline_code['removeRunDependency()'] = ':js:func:`removeRunDependency`'
mapped_wiki_inline_code['ret_val'] = ':cpp:type:`ret_val`'
mapped_wiki_inline_code['select_const'] = ':cpp:func:`select_const`'
mapped_wiki_inline_code['select_const()'] = ':cpp:func:`select_const`'
mapped_wiki_inline_code['setValue'] = ':js:func:`setValue`'
mapped_wiki_inline_code['setValue()'] = ':js:func:`setValue`'
mapped_wiki_inline_code['sharing_policy'] = ':cpp:type:`sharing_policy`'
mapped_wiki_inline_code['smart_ptr_trait'] = ':cpp:type:`smart_ptr_trait`'
mapped_wiki_inline_code['stackTrace'] = ':js:func:`stackTrace`'
mapped_wiki_inline_code['stackTrace()'] = ':js:func:`stackTrace`'
mapped_wiki_inline_code['std::add_pointer<Signature>::type'] = ':cpp:func:`std::add_pointer<Signature>::type`'
mapped_wiki_inline_code['std::add_pointer<Signature>::type()'] = ':cpp:func:`std::add_pointer<Signature>::type`'
mapped_wiki_inline_code['stringToUTF16'] = ':js:func:`stringToUTF16`'
mapped_wiki_inline_code['stringToUTF16()'] = ':js:func:`stringToUTF16`'
mapped_wiki_inline_code['stringToUTF32'] = ':js:func:`stringToUTF32`'
mapped_wiki_inline_code['stringToUTF32()'] = ':js:func:`stringToUTF32`'
mapped_wiki_inline_code['stringToUTF8'] = ':js:func:`stringToUTF8`'
mapped_wiki_inline_code['stringToUTF8()'] = ':js:func:`stringToUTF8`'
mapped_wiki_inline_code['worker_handle'] = ':c:var:`worker_handle`'
mapped_wiki_inline_code['writeArrayToMemory'] = ':js:func:`writeArrayToMemory`'
mapped_wiki_inline_code['writeArrayToMemory()'] = ':js:func:`writeArrayToMemory`'
mapped_wiki_inline_code['writeAsciiToMemory'] = ':js:func:`writeAsciiToMemory`'
mapped_wiki_inline_code['writeAsciiToMemory()'] = ':js:func:`writeAsciiToMemory`'
mapped_wiki_inline_code['writeStringToMemory'] = ':js:func:`writeStringToMemory`'
mapped_wiki_inline_code['writeStringToMemory()'] = ':js:func:`writeStringToMemory`'
return mapped_wiki_inline_code
|
UTF-8
|
Python
| false | false | 63,319 |
py
| 91 |
api_items.py
| 48 | 0.715709 | 0.714193 | 0 | 667 | 93.931034 | 154 |
erraa/kresets
| 2,731,599,236,036 |
14458bfab447687a76eb8e0ff52f4c126f91aa6c
|
975bd514c540b4c7376e0165af24a3cac657af8c
|
/app/views.py
|
663fb32cb98b26b0889aef147ebcd6a730919927
|
[] |
no_license
|
https://github.com/erraa/kresets
|
421c9f9f4fe77be6896d366c0a69837c86aa4af3
|
1f62cfecf00ed6d69a2f3a32993b25d69b0a6ef4
|
refs/heads/master
| 2020-04-06T06:57:20.069087 | 2016-09-11T08:14:07 | 2016-09-11T08:14:07 | 64,059,193 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
posts = [{'body': 'ONYXIA'}, {'body', 'ZG'}]
wberg = {'wberg': 'Wbergs Mamma'}
return render_template('index.html',
title='Home',
wberg=wberg,
posts=posts
)
|
UTF-8
|
Python
| false | false | 382 |
py
| 3 |
views.py
| 1 | 0.455497 | 0.455497 | 0 | 13 | 28.384615 | 48 |
jacealan/booktalk
| 5,514,738,010,046 |
63287efae402d896ae7aea2959dc85c7f7ac4377
|
5c874d83da5c246b7fdf8a1cd778bd9eba0f963d
|
/jacevenv/lib/python3.6/re.py
|
5eba8bbaea92008a5738bb3004cf80d138008a0d
|
[] |
no_license
|
https://github.com/jacealan/booktalk
|
29a5061436ec9ebf571b32b7e7831363a2a824ba
|
fb3f823966de8f280caead2b0519804d6ff45028
|
refs/heads/master
| 2021-08-28T07:11:00.763662 | 2017-12-11T14:35:30 | 2017-12-11T14:35:30 | 113,454,681 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
/Users/jace/anaconda3/lib/python3.6/re.py
|
UTF-8
|
Python
| false | false | 41 |
py
| 54 |
re.py
| 48 | 0.804878 | 0.731707 | 0 | 1 | 41 | 41 |
ZipluxLHS/pyziplux
| 12,953,621,371,989 |
5dec25f49b107e7cf7df4344e4fde3c0bf4b1e94
|
d91f24ae8b47947af0241ad81dbbe9d20dcbe431
|
/microsoft/office.py
|
1057fdeaaf001f0512ab87cc7d227175db68ff90
|
[
"MIT"
] |
permissive
|
https://github.com/ZipluxLHS/pyziplux
|
e404cbb883d30a3007bb94476568c0c8393b52fc
|
1d8ada67e6237b30c35805b4b34c01f14865164f
|
refs/heads/main
| 2023-08-25T00:09:07.173748 | 2021-10-20T09:52:00 | 2021-10-20T09:52:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import openpyxl
from openpyxl import Workbook
from openpyxl.comments import Comment
class Xlsx:
def __init__(self, filePath):
self.path = filePath
def writeWorkbook(self, source):
self.wb = Workbook()
ws = self.wb.active
for each in source:
ws.append(each)
self.wb.save(self.path)
def writeComment(self, **comments):
for each in comments:
self.ws["{}".format(each)].comment = Comment('{}'.format(comments[each]), 'Ziplux')
self.wb.save(self.path)
def readWorkbook(self):
self.wb = openpyxl.load_workbook(self.path)
memory.msm.info(tagName='readWorkbook(filePath) :', tag=self.path)
return self.wb.sheetnames
def readWorksheet(self, sheetname):
memory.msm.info(tagName='sheetname :', tag=sheetname)
ws = self.wb[sheetname]
df = pd.DataFrame(ws.values)
return df
|
UTF-8
|
Python
| false | false | 988 |
py
| 3 |
office.py
| 3 | 0.580972 | 0.580972 | 0 | 31 | 29.935484 | 95 |
m-nez/mtg_deck_compiler
| 5,102,421,186,922 |
1984fd2d40874571b0b057a3ae8953015b0ee5b7
|
57592c985f0823264c7025f59151681ab4b0d192
|
/mtg_deck_compiler.py
|
06ffe21e161c931b6df7703ebc63b569100ba738
|
[] |
no_license
|
https://github.com/m-nez/mtg_deck_compiler
|
1a317601789536eebc6a7fcfd06daf03abf02af7
|
36b76e91ee26f733bed7729694de489c288499ee
|
refs/heads/master
| 2020-12-25T19:39:22.923284 | 2018-12-30T14:23:03 | 2018-12-30T14:23:03 | 64,498,380 | 2 | 1 | null | false | 2017-10-15T17:51:48 | 2016-07-29T17:25:46 | 2016-07-29T17:29:04 | 2017-10-15T17:51:47 | 4 | 0 | 1 | 0 |
Python
| null | null |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
MTG deck compiler
Copyright (C) 2016, 2017 Michał Nieznański
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import requests
import os.path
import urllib
import argparse
import uuid
import logging
from subprocess import call
from fpdf import FPDF
def exists_abort(*args):
for p in args:
if os.path.exists(p):
ans = input("".join(["Warning: ",
p, " already exists. Do you want to overwrite it? [y/n] "]))
if ans != "y":
print("Aborting")
exit(0)
class MagicCards:
_root = "http://magiccards.info"
def make_query(cardname):
cname = cardname.replace(" ", "+")
query = "http://magiccards.info/query?q=%s&v=card&s=cname" % cname
return query
def img_url(site, card):
m = re.search(r'img\s+src="([^"]+)"\s+alt="%s"' % card, site, re.M)
if m == None:
raise LookupError("Could not find image for: %s" % card)
return m.group(1)
def change_lang(url, lang):
spl = url.split("/")
spl[2] = language
return "/".join(spl)
@classmethod
def get_img_url(cls, card):
q = MagicCards.make_query(card)
r = requests.get(q)
url = MagicCards.img_url(r.text, card)
if not url.startswith("http://"):
url = urllib.parse.urljoin(cls._root, url)
return url
class Gatherer:
def make_query(cardname):
query = "http://gatherer.wizards.com/Pages/Search/Default.aspx?name=+[%s]" % cardname
return query
def get_img_url(card):
q = Gatherer.make_query(card)
r = requests.get(q)
mvid = r.url[r.url.find("=") + 1:]
url = "".join(["http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid=", mvid, "&type=card"])
return url
class Scryfall:
def save_img(cardname, filename):
response = requests.get(
"https://api.scryfall.com/cards/named",
params={"exact" : cardname, "format" : "image"}
)
with open(filename, "wb") as f:
f.write(response.content)
def save_img(url, filename):
r = requests.get(url)
if len(r.content) == 0:
print("Error: Incorrect url", url, "for", filename)
return
with open(filename, "wb") as f:
f.write(r.content)
class ImageMagic:
_resolution = (312, 445)
@classmethod
def resize(cls, img):
"""
Use ImageMagic to resize images to the common size
"""
new_size = '%dx%d!' % cls._resolution
call(['convert', img, '-resize', new_size, img])
@classmethod
def montage3x3(cls, images, output):
"""
Make an image with a 3x3 table from input images
"""
size = "%dx%d+8+8" % (cls._resolution)
call(["montage", "-tile", "3x3", "-geometry", size, "-depth", "8", *images, output])
@staticmethod
def convert(images, output):
call(["convert", *images, output])
class ImageTools:
_w = 210
_h = 297
@classmethod
def pdf_from_images(cls, images, output):
pdf = FPDF()
for image in images:
pdf.add_page()
pdf.image(image, x=0, y=0, w=cls._w, h=cls._h)
pdf.output(output, "F")
class Compiler:
def __init__(self, deck, directory="", prefix="page", img_format="png", overwrite=False):
self._directory = directory
self._deck = deck
self._dict = {}
self._prefix = prefix
self._suffix = "".join([".", img_format])
self.load_dec(deck)
self._images = []
self._overwrite = overwrite
def load_dec(self, filename):
f = open(filename, "r")
self._dict = {}
self._size = 0
for l in f:
if l[0] == "#" or l[0] == "\n":
continue
if l.startswith("SB:"):
count, name = l.split(maxsplit = 2)[1:]
else:
count, name = l.split(maxsplit=1)
count = int(count)
name = name.strip()
if name not in self._dict:
self._dict[name] = count
else:
self._dict[name] += count
self._size += count
def download_img(self):
"""
Download deck cards from magiccards or gatherer
"""
for card in self._dict:
if self.check_cache(card):
print("Found cached: %s" % card)
else:
print("Downloading: %s" % card)
path = os.path.join(self._directory, card)
exists_abort(path)
try:
Scryfall.save_img(card, path)
except Exception as e:
logging.info(e)
try:
url = MagicCards.get_img_url(card)
except LookupError as e:
logging.info(e)
url = Gatherer.get_img_url(card)
if not self._overwrite:
exists_abort(path)
save_img(url, path)
ImageMagic.resize(path)
def check_cache(self, img):
"""
Check if image is in the cache
"""
return os.path.isfile(os.path.join(self._directory, img))
def make_montage(self):
num_pages = (self._size - 1) // 9 + 1
images = [os.path.join(self._directory, im) for im in self._dict for i in range(self._dict[im])]
self._images = []
for i in range(num_pages):
output = "".join([self._prefix, str(i), self._suffix])
if not self._overwrite:
exists_abort(output)
ImageMagic.montage3x3(images[i * 9 : (i + 1) * 9], output)
self._images.append(output)
def merge_pdf(self, output):
if not output.lower().endswith(".pdf"):
output = "".join([output, ".pdf"])
if not self._overwrite:
exists_abort(output)
ImageTools.pdf_from_images(self._images, output)
def remove_images(self):
call(["rm", *self._images])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate pages containg up to 9 cards from a deck file.")
parser.add_argument("deck_file",
type=str, help="path to a deck file with each line being: quantity cardname")
parser.add_argument("-p", "--prefix", default=uuid.uuid1().hex,
type=str, help="prefix attached to each generated file")
parser.add_argument("-c", "--cache", default="/tmp/mtg_deck_compiler_cache",
type=str, help="directory with cached card images")
parser.add_argument("-f", "--format", default="png",
type=str, help="image format of the generated images")
parser.add_argument("-m", "--merge", default="",
help="path to merged pdf file generated from images")
parser.add_argument("-k", "--keep", action="store_true",
help="don't delete the images after generating the merged pdf")
parser.add_argument("-o", "--overwrite", action="store_true",
help="overwrite files without asking")
parser.add_argument("-l", "--log-level", default="INFO", choices=["CRITICAL", "INFO"],
help="set log level")
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
if not os.path.exists(args.cache):
os.makedirs(args.cache)
p = Compiler(
args.deck_file,
directory=args.cache,
prefix=args.prefix,
img_format=args.format,
overwrite=args.overwrite)
p.download_img()
p.make_montage()
if args.merge:
p.merge_pdf(args.merge)
if not args.keep:
p.remove_images()
|
UTF-8
|
Python
| false | false | 8,517 |
py
| 2 |
mtg_deck_compiler.py
| 1 | 0.549031 | 0.542572 | 0 | 254 | 32.523622 | 108 |
kenpuca/db.science.uoit.ca
| 18,528,488,956,972 |
83d5c61d22d614005973eb782b4da2fc3e7371ac
|
93cafda4d25704cf00e11eb3457918c427a83cad
|
/courses/algorithms/mst/code/mst.py
|
629ea729d589110961bbdd530da9ae9c87ea8ecf
|
[] |
no_license
|
https://github.com/kenpuca/db.science.uoit.ca
|
92983a0188be294d4891a6fd8410823749a2ea76
|
970cbbeb691ef72bb6cec4def59c0640ccf5ee81
|
refs/heads/master
| 2020-05-21T22:49:41.674556 | 2017-10-25T02:17:01 | 2017-10-25T02:17:01 | 63,455,393 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def minimal_edge(H, V0):
"""pick an edge with the minimal weight
from V0 to the rest of the nodes"""
w_min = None
e_min = None
for e in H.E.keys():
if e[0] in V0 and e[1] not in V0:
w = H.E[e]["weight"]
if w_min == None or w < w_min:
e_min = e
w_min = w
return e_min
def prims(G, debug=None):
"returns a list of edges"
tree_nodes = set()
tree_edges = set()
# start with an arbitrarily picked vertex
N = len(G.V)
v = G.V.keys()[0]
tree_nodes.add(v)
for i in range(N-1):
if debug: debug(i, tree_nodes, tree_edges)
e = minimal_edge(G, tree_nodes)
if not e:
print "graph is not fully connected"
return tree_edges
tree_nodes.add(e[1])
tree_edges.add(e)
if debug: debug(i, tree_nodes, tree_edges)
return tree_edges
#================================
# Kruskal
#================================
def find_set(partition, v):
for i,p in enumerate(partition):
if v in p:
return i
return None
def join_sets(partition, i1, i2):
p1 = partition[i1]
p2 = partition[i2]
j1 = min(i1, i2)
j2 = max(i1, i2)
partition[j2:j2+1] = [] # remove the later one
partition[j1:j1+1] = [p1 | p2]
def kruskal(G, debug=None):
"returns a list of edges"
# get the edges and short by the weights
tree_edges = set()
edges = [e for e in G.E.keys() if e[0] < e[1]]
edges.sort(key=lambda e: G.E[e]["weight"])
partition = [set([v]) for v in G.V.keys()]
for i,e in enumerate(edges):
if len(partition) == 1:
break
i1 = find_set(partition, e[0])
i2 = find_set(partition, e[1])
if not i1 == i2:
join_sets(partition, i1, i2)
tree_edges.add(e)
if debug: debug(i, None, tree_edges)
return tree_edges
|
UTF-8
|
Python
| false | false | 1,916 |
py
| 72 |
mst.py
| 20 | 0.518789 | 0.497912 | 0 | 70 | 26.371429 | 50 |
rscgh/BA4445_Classifier
| 5,970,004,570,729 |
52a6bdebb325175d41b46ce3a79c298c630f73be
|
3579859cc04f12aa4554feb7f5f7c01bc9dab449
|
/hcp_tools.py
|
8b1a72440a9437ba844280e56032683ad40a1367
|
[] |
no_license
|
https://github.com/rscgh/BA4445_Classifier
|
bcbebd852555874b251d1a8aedb66298ccf8205e
|
e92093f2a2e4a74584575d1485fca383b9a4ffc7
|
refs/heads/main
| 2023-07-29T18:23:22.624979 | 2021-09-14T11:22:54 | 2021-09-14T11:22:54 | 302,207,034 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''
preprocess_and_load_tseries is based off the original script by Şeyma Bayrak, https://github.com/sheyma
repository located on: https://github.com/NeuroanatomyAndConnectivity/hcp_corr
code was a bit more streamlined and put up to date
furthermore the option for smoothing was added; for that the wb_command needs to be available from the systems terminal
'''
## called from ICA_HCP like:
## t_series(subject = "/scr/murg2/HCP_Q3_glyphsets_left-only/100307", hemisphere='LH', N_first=0, N_cnt=32492)
## K = t_series(subject = "/data/t_hcp/S500_2014-06-25/_all/100307", hemisphere='LH', N_first=0, N_cnt=32492)
# from load_hcp_annotated_rsc import t_series
from glob import glob
import os
import nibabel as nib
import numpy as np
import subprocess
from matplotlib import pyplot as plt
############################
# Global import data
cort = np.loadtxt('res/indices_LH_29696_of_32492.txt').astype(np.int).tolist()
mask = np.zeros((32492)); np.put(mask, cort, 1)
bm_leftown = nib.cifti2.BrainModelAxis.from_mask(mask, "LEFT_CORTEX")
############################
# Saving files
def save_LH_29k_dtseries():
pass;
# this is not really functional yet ...
def get_LH_29k_brainmodel(mask_indices = None, extract_brain_mode_from_file = None, area_list = ["LH"] ):
if mask_indices is None:
if extract_brain_mode_from_file is None:
extract_brain_mode_from_file = '/data/t_hcp/S500_2014-06-25/_all/100307/MNINonLinear/Results/rfMRI_REST1_LR/rfMRI_REST1_LR_Atlas_hp2000_clean.dtseries.nii';
img = nib.load(extract_brain_mode_from_file)
cort = list(img.header.matrix._mims[1].brain_models)[0].vertex_indices._indices
mask_indices = cort
mask = np.zeros((32492)); np.put(mask, mask_indices, 1)
brainmodel = nib.cifti2.BrainModelAxis.from_mask(mask, "LEFT_CORTEX")
return brainmodel
# data should be numpy array of (n_scalars, n_vertices) i.e. 10, 29696
def save_dscalar(filename, data, brainmodel, scalar_names = None, subset = None):
n_scalars = data.shape[1];
n_vertices = brainmodel.size; # i.e. 29696 for only left hemisphere in 32k_FS_LR
if scalar_names is None:
scalar_names = [str(x) for x in range(data.shape[1])]
new_scalar_axis = nib.cifti2.ScalarAxis(scalar_names);
ni_header = nib.cifti2.Cifti2Header.from_axes((new_scalar_axis, brainmodel))
if not(subset is None):
newdata = np.zeros((n_scalars, n_vertices))
newdata[:,subset] = data;
data = newdata;
nib.Cifti2Image( data, ni_header).to_filename(filename);
return;
# Visualization and saving helper functions:
def quick_cifti_ds(data, dsnames = None, fn = None, return_img=False):
global bm_leftown;
if dsnames is None: dsnames = ['img%i' % (x) for x in range(data.shape[0])]
cimgvis4 = nib.Cifti2Image(data, nib.cifti2.Cifti2Header.from_axes((nib.cifti2.ScalarAxis(dsnames), bm_leftown)))
if not (fn is None): cimgvis4.to_filename(fn);
if return_img: return cimgvis4;
return;
def quick_show_FS32k(mesh="flat", ref="S1200"):
#global ...
pass
from matplotlib.ticker import MaxNLocator
def imtlshow(img, plot = plt, ax = plt.gca(), show = True):
plt.imshow(img.T)
tlshow(show=show)
def tlshow(plot = plt, ax = plt.gca(), show=True):
#ax.set_ylim(ax.get_ylim()[::-1]) # invert the axis
#ax.yaxis._update_ticks()
#ax.yaxis.set_ticks(ax.yaxis.get_major_ticks()[::-1]) # set y-ticks
#ax.yaxis.tick_left() # remove right y-Ticks
ax.invert_yaxis()
ax.set_ylim(ax.get_ylim()[::-1])
ax.xaxis.tick_top() # and move the X-Axis
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if show: plt.show()
def get_individual_IFG(sub, hcp_all_path = '/data/t_hcp/S500_2014-06-25/_all'):
anatlabp = os.path.join(hcp_all_path, sub, 'MNINonLinear/fsaverage_LR32k/%s.L.aparc.32k_fs_LR.label.gii' % (sub))
AnatLabels = nib.load(anatlabp) #AnatLabels2 = nib.gifti.giftiio.read(anatlabp)
#AnatLabels.print_summary()
#AnatLabels.get_labeltable().labels[20].label #-> u'L_parstriangularis'
#AnatLabels.darrays[0].data.shape #-> (32492,); elements: array([10, 29, 24, ..., 15, 15, 15], dtype=int32)
AnatLabelsData= AnatLabels.darrays[0].data
op = AnatLabelsData == 18; # shape: (32492,)
tri = AnatLabelsData == 20;
#np.count_nonzero((op+tri)) # -> 989 voxels in the combined region
return [op, tri];
#previously called: t_series
def preprocess_and_load_tseries(subject_dir, # i.e. "/scr/murg2/HCP_Q3_glyphsets_left-only/100307"
subject_id, # i.e. 100307
template = None,
cnt_files=4,
hemisphere='LH',
N_first=None, # i.e. 0
N_cnt=None, # i.e. 32492 svoxels of one hemisphere
dtype=None,
smoothing=False, # recently added, done before normalization and concatenation;
temp_dir ="", # used for storing the intermediate smoothing files, if none is given, the script execution dir is used
keep_tmp_files = False,
normalize=True):
"""Load/write Human Connectome Project (HCP) neuroimaging files via NiBabel
module. The HCP data is released in GIFTI format (*nii extention) for
almost 500 subjects. This script aims to get concetanation of all
time-series for each subject.
subject : string
subject = data_path + subject_id.
e.g. subject = '/a/documents/connectome/_all/100307'
template : string
Template is the sketch-name of *.nii files (GIFTI format), it is hard-
coded as template_flag and template_orig...
cnt_files : int
Number of *.nii files of interest for a subject. The template above
has 4 forms in total, therefore cnt_files = 4
hemisphere : string
# LH: CORTEX_LEFT >> N_first = 0, N_cnt = 29696
# RH: CORTEX_RIGHT >> N_first = 29696, N_cnt = 29716
# UL: ACCUMBENS_LEFT >> N_first = 59412, N_cnt = 135
# UR: ACCUMBENS_RIGHT >> N_first = 59547, N_cnt = 140
# ML: AMYGDALA_LEFT >> N_first = 59687, N_cnt = 315
# MR: AMYGDALA_RIGHT >> N_first = 60002, N_cnt = 332
# BS: BRAIN_STEM >> N_first = 60334, N_cnt = 3472
# CL: CAUDATE_LEFT >> N_first = 63806, N_cnt = 728
# CR: CAUDATE_RIGHT >> N_first = 64534, N_cnt = 755
# EL: CEREBELLUM_LEFT >> N_first = 65289, N_cnt = 8709
# ER: CEREBELLUM_RIGHT >> N_first = 73998, N_cnt = 9144
# DL: DIENCEPHALON_VENTRAL_LEFT >> N_first = 83142, N_cnt = 706
# DR: DIENCEPHALON_VENTRAL_RIGHT >> N_first = 83848, N_cnt = 712
# HL: HIPPOCAMPUS_LEFT >> N_first = 84560, N_cnt = 764
# HR: HIPPOCAMPUS_RIGHT >> N_first = 85324, N_cnt = 795
# PL: PALLIDUM_LEFT >> N_first = 86119, N_cnt = 297
# PR: PALLIDUM_RIGHT >> N_first = 86416, N_cnt = 260
# AL: PUTAMEN_LEFT >> N_first = 86676, N_cnt = 1060
# AR: PUTAMEN_RIGHT >> N_first = 87736, N_cnt = 1010
# TL: THALAMUS_LEFT >> N_first = 88746, N_cnt = 1288
# TR: THALAMUS_RIGHT >> N_first = 90034, N_cnt = 1248
# full : all of them >> N_first = 0, N_cnt = 91282
K : output, numpy.ndarray
Concetanation of time-series matrices obtained from each *.nii file.
References :
http://www.humanconnectome.org/
https://github.com/satra/nibabel/tree/enh/cifti2
right nibabel version to download:
$ git clone --branch enh/cifti2 https://github.com/satra/nibabel.git
"""
'''
subject='/data/t_hcp/S500_2014-06-25/_all/100307'
subject='/data/t_hcp/S500_2014-06-25/_all/103414'
'''
template_flat = 'rfMRI_REST?_??_Atlas_hp2000_clean.dtseries.nii'
template_orig = 'MNINonLinear/Results/rfMRI_REST?_??/rfMRI_REST?_??_Atlas_hp2000_clean.dtseries.nii'
'''
from glob import glob
glob(os.path.join(subject, template_flat))
files = glob(os.path.join(subject, template_orig)) -> yields 4 files:
/data/t_hcp/S500_2014-06-25/_all/100307/MNINonLinear/Results/rfMRI_REST1_LR/rfMRI_REST1_LR_Atlas_hp2000_clean.dtseries.nii
/data/t_hcp/S500_2014-06-25/_all/100307/MNINonLinear/Results/rfMRI_REST1_LR/rfMRI_REST[1|2]_[LR|RL]_Atlas_hp2000_clean.dtseries.nii
'''
# search files in given and default templates
files = []
if template != None:
files = [val for val in sorted(glob(os.path.join(subject_dir, template)))]
if len(files) == 0:
files = [val for val in sorted(glob(os.path.join(subject_dir, template_flat)))]
if len(files) == 0:
files = [val for val in sorted(glob(os.path.join(subject_dir, template_orig)))]
if len(files) < cnt_files:
return []
#raise Exception('Not enough files found!')
files = files[:cnt_files]
print(files)
smooth_files = []
### Smoothing
if smoothing:
for filen in files:
local_tar_filename = os.path.join(temp_dir,"smooth_tmp_%s_" %(subject_id) + os.path.split(filen)[-1])
left_surface_file = os.path.join(subject_dir, "MNINonLinear/fsaverage_LR32k/%s.L.midthickness.32k_fs_LR.surf.gii" % (subject_id))
right_surface_file = os.path.join(subject_dir, "MNINonLinear/fsaverage_LR32k/%s.R.midthickness.32k_fs_LR.surf.gii" % (subject_id))
print("left_surface_file: ", left_surface_file)
if os.path.exists(local_tar_filename):
smooth_files.append(local_tar_filename)
continue;
command = "wb_command -cifti-smoothing %s 2 2 COLUMN %s -left-surface %s -right-surface %s" % (filen, local_tar_filename, left_surface_file, right_surface_file)
print("Smoothing now: ", filen, " using the following command:\n", command)
subprocess.call(command.split())
print("Done.")
smooth_files.append(local_tar_filename)
files = smooth_files;
print("Final smooth files: ", smooth_files)
# dictionary for brain structures
label_index = { 'LH':0, 'RH':1, 'UL':2, 'UR':3, 'ML':4, 'MR':5, 'BS':6,
'CL':7, 'CR':8, 'EL':9, 'ER':10, 'DL':11, 'DR':12, 'HL':13,
'HR': 14, 'PL':15, 'PR':16, 'AL': 17, 'AR':18, 'TL':19,
'TR':20 }
for x in range(0, cnt_files):
# x = 1; x=2...
# import nibabel as nb
print("load file: ", files[x])
img = nib.load(files[x])
# if beginning and end indices given manually
if (N_first != None and N_cnt != None):
# img.data is decrepeted; now usw img.get_fdata() (should be the same as img.get_data())
# img.get_fdata().shape ~ (1200, 91282), 1200 timepoints x 91282 svoxels; just select the left hemisphere voxels
#single_t_series = img.data[:, N_first:N_first+N_cnt].T
single_t_series = img.get_fdata()[:, N_first:N_first+N_cnt].T
# yields a numpy.ndarray of shape (32492, 1200) with floating point values float64 ...
# if a particular brain structure wanted
# seems like hemisphere is ignored if N_first and N_cnt are given
elif hemisphere != 'full':
# find out the indices of brain structure of interest
hem = label_index[hemisphere] # yields 0 for 'LH'
print("BRAIN STRUCTURE: ")
# print img.header.matrix.mims[1].brainModels[hem].brainStructure
print(list(img.header.matrix._mims[1].brain_models)[hem].brain_structure) # -> CIFTI_STRUCTURE_CORTEX_LEFT
N_first = list(img.header.matrix._mims[1].brain_models)[hem].index_offset # -> 0
N_cnt = list(img.header.matrix._mims[1].brain_models)[hem].index_count # -> 29696
#single_t_series = img.data[:, N_first:N_first+N_cnt].T
single_t_series = img.get_fdata()[:, N_first:N_first+N_cnt].T
# if all brain nodes wanted
elif hemisphere == 'full':
N_first = 0
hem = 1
N_tmp = list(img.header.matrix._mims[1].brain_models)[hem].index_offset
N_cnt = list(img.header.matrix._mims[1].brain_models)[hem].index_count
N_cnt += N_tmp
#single_t_series = img.data[:, N_first:N_first+N_cnt].T
single_t_series = img.get_fdata()[:, N_first:N_first+N_cnt].T
# length of time series
m = single_t_series.shape[1]
n = single_t_series.shape[0]
m_last = m
n_last = n
if x == 0:
# In first loop we initialize matrix K to be filled up and returned
# By default we are using the same dtype like input file (float32)
init_dtype = single_t_series.dtype if dtype == None else dtype
K = np.ndarray(shape=[n,m], dtype=init_dtype, order='F')
else:
if m_last != m:
print("Warning, %s contains time series of different length" % (subject_dir))
if n_last != n:
print("Warning, %s contains different count of brain nodes" % (subject_dir))
K.resize([n, K.shape[1]+m])
# concatenation of (normalized) time-series, column-wise
if normalize:
mean_series = single_t_series.mean(axis=1)
std_series = single_t_series.std(axis=1)
K[:, -m:] = ((single_t_series.T - mean_series) / std_series).T
else:
K[:, -m:] = single_t_series
del img
del single_t_series
# remove the tempoary smooth files
if not keep_tmp_files:
print("Remove files: ", smooth_files)
for file in smooth_files: os.remove(file)
return K
|
UTF-8
|
Python
| false | false | 13,989 |
py
| 11 |
hcp_tools.py
| 5 | 0.603732 | 0.561982 | 0 | 366 | 37.213115 | 168 |
art32fil/slam_3d_tbm
| 12,549,894,461,341 |
49a36439831b200261eab2f78a411461e269cbab
|
e242e8ecabe19af42e3962fce75c696e4ecbb132
|
/python_ros_packages/bag_reader/src/bag_to_3D_points_with_poses.py
|
97ff7c8a0e5bfc3931fa46cef7d6d9ba70004178
|
[] |
no_license
|
https://github.com/art32fil/slam_3d_tbm
|
956931e72273134922618608a884850b8d0bfe1b
|
3ab07daeb743807ec211b0776621454a1294e9a2
|
refs/heads/master
| 2020-09-10T04:00:53.116074 | 2020-04-13T09:19:31 | 2020-04-13T09:19:31 | 221,643,189 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import rospy
import rosbag
import sensor_msgs
from sensor_msgs.msg import PointCloud2 as pcl2
from sensor_msgs import point_cloud2 as pcl2_handler
import tf2_ros
import tf2_py as tf2
from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Quaternion
import tf.transformations as tr
import numpy as np
from ros_numpy import point_cloud2
import math
from sensor_msgs.msg import ChannelFloat32
from visualization_msgs.msg import Marker
from map import Map
from map import cell_to_prob
from map import merge_cells
from bresenham import bresenhamline
from bresenham import _bresenhamlines
from cloud_to_marker import cloud_to_marker
class Cloud:
def __init__(self, name="", values=np.array([[]])):
self.name = name
self.values = values
def position_to_cell(pose, scale):
i = int(math.floor(pose[0]/scale))
j = int(math.floor(pose[1]/scale))
k = int(math.floor(pose[2]/scale))
return np.array([[i,j,k]])
def positions_to_cell(array, scale):
return np.floor(array/scale).astype(int)
def get_pose_angle(transform):
if not isinstance(transform, TransformStamped):
rospy.logwarn("try to convert tansform of incorrect type.\nThe type is %s\nThe possible type is %s",
type(transform).__name__, "TransformStamped")
pose = transform.transform.translation
orientation = transform.transform.rotation
p = np.array([pose.x, pose.y, pose.z])
a = np.array(tr.euler_from_quaternion([orientation.x, orientation.y, orientation.z, orientation.w]))
return p,a
def getSE3_matrix(pose,angels):
# pose = np.array([x, y, z])
# angels = np.array([ax, ay, az])
g = tr.euler_matrix(angels[0], angels[1], angels[2])
g[0:3, -1] = pose
return g
def transformStamped_to_SE3(transform):
if not isinstance(transform, TransformStamped):
rospy.logwarn("try to convert tansform of incorrect type.\nThe type is %s\nThe possible type is %s",
type(transform).__name__, "TransformStamped")
p,a = get_pose_angle(transform)
g = getSE3_matrix(p,a)
return g
def transform_cloud(SE3, new_frame, cloud):
cloud_out = Cloud()
cloud_out.name = new_frame
cloud_out.values = np.matmul(SE3, cloud.values)
return cloud_out
def cost_of_scan(grid_map, scan, robot_pose):
occupied_points = scan + np.array([grid_map.Zx, grid_map.Zy, grid_map.Zz])
occ_cells = grid_map.map[occupied_points[:,0],occupied_points[:,1],occupied_points[:,2]]
value = np.sum(cell_to_prob(merge_cells(occ_cells, np.array([0.9,0.5,0.5]))))
return value#accumulator
def find_best_pose(grid_map, scan_meters, robot_pose_meters, robot_angles, scale, world_frame):
final_pose = robot_pose_meters
final_angles = robot_angles
SE3 = getSE3_matrix(final_pose, final_angles)
cloud_out = transform_cloud(SE3, world_frame, scan_meters)
cloud_cells = positions_to_cell(np.transpose(cloud_out.values[0:-1]), scale)
robot_cells = position_to_cell(robot_pose_meters, scale)
max_cost = cost_of_scan(grid_map, cloud_cells, robot_cells)
final_robot_cells = robot_cells
final_cloud_cells = cloud_cells
final_pose_delta = np.array([0.0, 0.0, 0.0])
final_angles_delta = np.array([0.0, 0.0, 0.0])
for i in range(1000):
delta_pose = 0.5*scale*np.random.randn(3)
delta_angles = 0.01*np.random.randn(3)
if scan_matcher_is_used == False:
delta_pose = np.array([0,0,0])
delta_angles = np.array([0,0,0])
new_pose = final_pose + delta_pose
new_angles = final_angles + delta_angles
new_SE3 = getSE3_matrix(new_pose, new_angles)
#print(SE3 - new_SE3)
#print("before transform_cloud")
#t = rospy.Time.now()
cloud_out = transform_cloud(new_SE3, world_frame, scan_meters)
#print("transform_cloud executes: ", rospy.Time.now() - t)
#print("after transform_cloud")
cloud_cells = positions_to_cell(np.transpose(cloud_out.values[0:-1]), scale)
robot_cells = position_to_cell(new_pose, scale)
#print("before cost_of_scan")
#t = rospy.Time.now()
cost = cost_of_scan(grid_map, cloud_cells, robot_cells)
#print("min_cost: ", min_cost, ", cost: ", cost)
#print("cost_of_scan executes: ", rospy.Time.now() - t)
#print("after cost_of_scan")
if cost > max_cost*1.05:
max_cost = cost
final_pose = new_pose
final_angles = new_angles
final_robot_cells = robot_cells
final_cloud_cells = cloud_cells
final_pose_delta = delta_pose
final_angles_delta = delta_angles
return final_pose, final_angles, final_robot_cells, final_cloud_cells, final_pose_delta, final_angles_delta
def get_tf_transform(in_which_frame, what_frame, tf_buffer):
try:
trans = tf_buffer.lookup_transform(in_which_frame, what_frame,
rospy.Time(0),
rospy.Duration(4))
except tf2.LookupException as ex:
rospy.logwarn(ex)
return None
except tf2.ExtrapolationException as ex:
rospy.logwarn(ex)
return None
return trans
def time_to_s_ms_ns(t):
t = t.to_nsec()
secs = int(t/1000000000)
msecs = int(t/1000000) - 1000*secs
mksecs = int(t/1000) - 1000*msecs - 1000000*secs
nsecs = t - 1000*mksecs - 1000000*msecs - 1000000000*secs
return "%ss %sms %smks %sns"%(secs,msecs,mksecs,nsecs)
if __name__ == '__main__':
rospy.init_node("name_by_default")
np.random.seed(int(rospy.get_param('~seed')))
rospy.loginfo("begin")
pose_out_file = open(str(rospy.get_param('~out/file_poses')), 'w')
bag_name = str(rospy.get_param('~bag/file_path'))
bag = rosbag.Bag(bag_name, 'r')
topic_pcl2_name = str(rospy.get_param('~bag/topic_pcl'))
world_frame = str(rospy.get_param('~tf/world_frame'))
odom_frame = str(rospy.get_param('~tf/odom_frame'))
camera_frame = str(rospy.get_param('~tf/pcl_frame'))
scan_matcher_is_used = bool(rospy.get_param('~sm/allow_scan_matcher'))
rospy.loginfo("before_for")
tf_buffer = tf2_ros.Buffer()
tf_bc = tf2_ros.TransformBroadcaster()
tf_listener = tf2_ros.TransformListener(tf_buffer)
pub1 = rospy.Publisher("~/transormed_pc", Marker, queue_size=10)
pub2 = rospy.Publisher("~/original_pc", Marker, queue_size=10)
scale = float(rospy.get_param("~map/meters_per_cell"))
r = rospy.Rate(1)
m = Map(scale)
t3 = rospy.Time.now()
dp = np.array([0., 0., 0.])
dp_abs = np.array([0., 0., 0.])
da = np.array([0., 0., 0.])
pose = np.array([0., 0., 0.])
angle = np.array([0., 0., 0.])
for topic, msg, time in bag.read_messages(topics = [topic_pcl2_name, "/tf", "/tf_static"]):
if rospy.is_shutdown():
break
if topic == "/tf" or topic == "/tf_static":
transorms = msg
for msg_tf in transorms.transforms:
tf_bc.sendTransform(msg_tf)
msg_tf = TransformStamped()
msg_tf.header.stamp = time
msg_tf.header.frame_id = world_frame
msg_tf.child_frame_id = odom_frame
msg_tf.transform.translation = Vector3(dp[0],dp[1],dp[2])
q = tr.quaternion_from_euler(da[0],da[1],da[2])
msg_tf.transform.rotation = Quaternion(q[0],q[1],q[2],q[3])
tf_bc.sendTransform(msg_tf)
elif topic == topic_pcl2_name:
cloud = Cloud(msg.name, np.reshape(np.array(msg.values, dtype=np.float32),(4,-1),'F'))
#pts = pcl2_handler.read_points(msg, skip_nans=True, field_names=("x", "y", "z"))
pts = cloud.values
#print(pts.shape)
trans = get_tf_transform(in_which_frame = world_frame, what_frame = camera_frame,
tf_buffer = tf_buffer)
if (trans == None):
continue
p,a = get_pose_angle(trans)
print("p: ", p, " a: ", a)
pose = p
angle = a
#SE3 = getSE3_matrix(p,a)
camera_pose_cell = position_to_cell(p, scale)
#print(camera_pose_cell)
#cloud_out = transform_cloud(SE3, trans.header.frame_id, cloud)
#pts = cloud_out.values
t1 = rospy.Time.now()
print("pub markers: ", time_to_s_ms_ns(t1 - t3))
#print(cloud_out.values.shape)
#occupied_cells = np.transpose(positions_to_cell(pts[0:-1,:],scale))
(best_camera_pose_meters,
best_camera_angles,
best_camera_cells,
best_cloud_cells,
delta_pose,
delta_angle) = find_best_pose(m, cloud, pose, angle, scale, world_frame)
pose_quaternion = tr.quaternion_from_euler(best_camera_angles[0], best_camera_angles[1], best_camera_angles[2])
pose_out_file.write(str(time.to_sec())+" "+
str(best_camera_pose_meters[0])+" "+ str(best_camera_pose_meters[1]) + " " + str(best_camera_pose_meters[2])+ " "+
str(pose_quaternion[0])+" "+str(pose_quaternion[1])+" "+str(pose_quaternion[2])+" "+str(pose_quaternion[3])+"\n")
cloud_out = transform_cloud(getSE3_matrix(best_camera_pose_meters, best_camera_angles),
world_frame, cloud)
print("dp: ", positions_to_cell(delta_pose,scale), " da: ", delta_angle)
trans = get_tf_transform(in_which_frame = world_frame, what_frame = odom_frame,
tf_buffer = tf_buffer)
p,a = get_pose_angle(trans)
dp = p + delta_pose
da = a + delta_angle
print("dp_: ", positions_to_cell(dp,scale), " da_ ", da)
lines = _bresenhamlines(best_cloud_cells, best_camera_cells,-1)
t2 = rospy.Time.now()
print("scan matcher: ", time_to_s_ms_ns(t2 - t1))
#print(type(line[0][0]).__name__)
#m.update_cells(line,np.array([0.05, 0.9, 0.05]))
#m.update_cells(best_cloud_cells,np.array([0.9, 0.05, 0.05]))
for line in lines:
l = -0.5
exp = 0.95*np.exp(l*np.arange(0,len(line)))
cell_values = np.transpose(np.array([exp, 0.95 - exp, 0.05*np.ones(len(line))]))
m.update_cells(line,cell_values)
t3 = rospy.Time.now()
print("update cells: ", time_to_s_ms_ns(t3 - t2))
#pub1.publish(cloud_to_marker(cloud_out,1,0,0))
#pub2.publish(m.to_marker())
#pub2.publish(cloud_to_marker(cloud,0,1,0))
#r.sleep()
#input("Press Enter to continue...")
pose_out_file.close()
rospy.loginfo("after_for")
|
UTF-8
|
Python
| false | false | 9,837 |
py
| 14 |
bag_to_3D_points_with_poses.py
| 11 | 0.663312 | 0.637694 | 0 | 271 | 35.239852 | 136 |
ellenmconsidine/Python-Slang-Dictionary
| 4,088,808,893,962 |
ce4a0ae2beb1e86304a41d43b2a5222efe572075
|
6095fbed62280243314037da2f9ab3bc7570e041
|
/Assignment10_Considine.py
|
d38fc21515d1a427771de540ac7059d4087bfaac
|
[] |
no_license
|
https://github.com/ellenmconsidine/Python-Slang-Dictionary
|
dfb84442c1f4a60a00c47bf6b2f51083e498e0c1
|
329870cba94390ea463fceb68c747cfb3febbc95
|
refs/heads/master
| 2021-01-11T20:03:09.338352 | 2017-01-19T13:51:39 | 2017-01-19T13:51:39 | 79,457,978 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Ellen Considine, TA: Ranga, Assignment 10
def CreateDictionary(filename):
my_dict = {}
lines = []
f = open(filename, 'r') #opening input file
x = f.readline()
while x != "":
lines.append(x)
x = f.readline()
f.close() #closing input file
for i in range(len(lines)):
key = ""
value = ""
for j in range(lines[i].find(',')):
key = key + lines[i][j]
for k in range(len(key)+1,len(lines[i])):
value = value + lines[i][k]
my_dict[key] = value #making dictionary entries
return my_dict
def Deslang(str, dict):
words = str.split(' ')
English = ""
for word in words: #building the phrase in English
if word in dict: #checking if each key is in the dictionary
English = English + dict[word].strip("\n") + " "
else:
English = English + word + " "
return English
def main():
#1
slang_dict = CreateDictionary("textTOEnglish.txt")
#2
text = raw_input("Enter a text abbreviation: ")
if text in slang_dict:
print Deslang(text, slang_dict)
else:
print "Not found"
text = raw_input("Enter a text abbreviation, or 'quit': ")
while text != "quit":
if text in slang_dict:
print Deslang(text, slang_dict)
else:
print "Not found"
text = raw_input("Enter a text abbreviation, or 'quit': ")
#3
text = raw_input("Enter some text abbreviations, separated by a space: ")
texts = text.split()
valid = True
for t in texts:
if t in slang_dict:
valid = True
else:
valid = False
if valid:
print Deslang(text, slang_dict)
else:
print "Not found"
text = raw_input("Enter some text abbreviations, separated by a space, or 'quit': ")
while text != "quit":
texts = text.split()
valid = True
for t in texts:
if t in slang_dict:
valid = True
else:
valid = False
if valid:
print Deslang(text, slang_dict)
else:
print "Not found"
text = raw_input("Enter some text abbreviations, separated by a space, or 'quit': ")
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,299 |
py
| 1 |
Assignment10_Considine.py
| 1 | 0.5398 | 0.53719 | 0 | 77 | 28.857143 | 92 |
daniel-reich/ubiquitous-fiesta
| 2,937,757,676,275 |
0316b972c911e49a3da6fb7e890538ee696e3b62
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/quMt6typruySiNSAJ_12.py
|
c1a325dca146548eb733dfed62d4554554f6df03
|
[] |
no_license
|
https://github.com/daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from math import ceil
def shuffle_count(num):
d=[i for i in range(num)]
ds=[0]*num
k=0
while True:
d1=d[:num//2]
d2=d[num//2:]
for i in range(0,num-1,2):
ds[i]=d1[ceil(i/2)]
ds[i+1]=d2[ceil(i/2)]
k+=1
d=ds
if d==sorted(d):
return k
|
UTF-8
|
Python
| false | false | 334 |
py
| 38,088 |
quMt6typruySiNSAJ_12.py
| 38,088 | 0.434132 | 0.389222 | 0 | 15 | 21.133333 | 34 |
briancabbott/GitHub-Repository-Downloader
| 1,872,605,750,131 |
2f5483b73e6c932ebf877dd6f2c39f8b97630100
|
5c157c4e76ca54d30f543e0393eae29d49d90962
|
/TypeScript/resources/code/ts_lib/Apollo-Latest/apollographql/tap-orbit/tap_orbit/streams/members.py
|
4f6c836273471a2692c63c145013bd21395610aa
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/briancabbott/GitHub-Repository-Downloader
|
a69ccc97100947525fd77e822544b84b82c5636a
|
b2ea9502f68e64ff4c8e02ff6113f4f4dc927f60
|
refs/heads/master
| 2023-05-25T07:21:43.908126 | 2023-05-21T08:05:45 | 2023-05-21T08:05:45 | 148,926,714 | 1 | 0 | null | false | 2022-12-08T23:32:10 | 2018-09-15T17:59:55 | 2022-07-10T13:31:30 | 2022-12-08T23:32:10 | 126,905 | 0 | 0 | 3 |
TypeScript
| false | false |
from tap_orbit.streams.base import BaseStream
import singer
LOGGER = singer.get_logger()
class MembersStream(BaseStream):
API_METHOD = "GET"
TABLE = "members"
KEY_PROPERTIES = ["id"]
def response_key(self):
return "data"
@property
def path(self):
return "members"
@property
def api_method(self):
return "GET"
|
UTF-8
|
Python
| false | false | 371 |
py
| 3,942 |
members.py
| 1,770 | 0.625337 | 0.625337 | 0 | 21 | 16.666667 | 45 |
legendAhsan/commentAnalyzer
| 8,727,373,564,884 |
f1b40adf6ab1eaa74d8769f7655a34ffdf5905ee
|
3fa66cef8a6ffe4546b235304d00e8c9266fcbc1
|
/app.py
|
881a6f6d48aed161aa67c8f35ec788f5d9e55447
|
[] |
no_license
|
https://github.com/legendAhsan/commentAnalyzer
|
d25c5b2eb010b95d4030494a42b9a919b28c46d1
|
ef8b7cfa4dff37f9b2b5a631cd9525d882efcb47
|
refs/heads/master
| 2023-02-16T18:37:40.828355 | 2021-01-13T07:39:07 | 2021-01-13T07:39:07 | 324,924,056 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, render_template, request, redirect
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from nltk.corpus import stopwords
import string
from nltk.stem.snowball import SnowballStemmer
import pandas as pd
import pickle
hell=Flask(__name__)
model = pickle.load(open('model.pkl','rb'))
cv = pickle.load(open('cv.pkl','rb'))
data=[{'name':'Nokia 3310','price':'$50/-','review':0,'comments':[]},{'name':'samsung 3310','price':'$10/-','review':0,'comments':[]}]
@hell.route('/')
def helloWorld():
return render_template('index.html',data=data)
@hell.route('/about/<mobilenumber>')
def about(mobilenumber):
return render_template('about.html',mobileName=mobilenumber,data=data)
@hell.route('/feedback/<mobileName>', methods=['GET','POST'])
def comment(mobileName):
user=[[request.form['post']]]
myword=(user[0])[0]
stop=set(stopwords.words('english'))
stop.remove('no')
stop.remove('not')
stop.remove('nor')
myword = ' '.join(e for e in myword.split() if e not in stop)
myword.lower().strip()
s = SnowballStemmer("english")
p=[]
for word in myword.split():
p.append(s.stem(word))
myword=' '.join(p)
y= cv.transform([myword])
prediction= model.predict(y)[0]
global data
for msg in data:
if msg['name']==mobileName:
msg['comments'].append((user[0])[0])
if prediction==0:
msg['review']-=1
elif prediction==1:
msg['review']+=1
break
data = sorted(data, key=lambda k: k['review'],reverse=True)
return render_template('index.html', data=data)
if __name__ == "__main__":
hell.run(debug=True)
|
UTF-8
|
Python
| false | false | 1,722 |
py
| 8 |
app.py
| 3 | 0.624855 | 0.611498 | 0 | 65 | 25.492308 | 134 |
ben1lam8/USMB-BachelorDIM-Lectures-Algorithms
| 7,791,070,692,326 |
61d2a9bb0f43b012e5071620adc655212df9ebd7
|
1cdfc01aef51ec4872e88fac9347c90ab9483244
|
/assignments/Session1/S1_dice.py
|
9960dc160e0fec6063d98a578cdb6d3c3507a4c0
|
[
"MIT"
] |
permissive
|
https://github.com/ben1lam8/USMB-BachelorDIM-Lectures-Algorithms
|
8bb89ce974ddafe49e33e47b70723be66bbfdcff
|
c7a8905462bdda40cb9d567dfa63de14649def5b
|
refs/heads/master
| 2021-01-23T19:57:41.416899 | 2017-10-31T20:29:59 | 2017-10-31T20:29:59 | 102,835,826 | 0 | 0 | null | true | 2017-09-08T08:06:46 | 2017-09-08T08:06:45 | 2017-08-14T13:32:52 | 2017-09-07T14:44:48 | 15 | 0 | 0 | 0 | null | null | null |
## @namespace S1_dice
# A dice game simulation
#
# @author Benoit Lamit, LPro DIM, IUT Annecy le vieux, FRANCE
import random;
# Global variables
player_score = 0;
computer_score = 0;
turn_count = 0;
## Game main logic
#
def main():
#Init the game
print("========================================================================");
print("=== Yay! Welcome to the Incredible Dice Game ! ===");
print("========================================================================\n");
print("Rules are easy :");
print("- You play against the computer");
print("- You can throw the dice as many times as you want");
print("- Each time, the dice value will be added to your turn score");
print("- If the dice shows a 1, your turn ends and you lose your turn score");
print("- Reach 100 points to win ! GOOD LUCK\n");
#Iterate trough turns until someone wins
while(player_score < 100 and computer_score < 100):
#Init the turn
global turn_count;
turn_count += 1;
print("---------------------------------------------");
print(" ROUND {t} ! ".format(t=turn_count));
print("Player : {p} pts | vs | Computer : {c} pts\n".format(p=player_score, c=computer_score))
print("---------------------------------------------");
#Switch between turns
if turn_count%2 == 1:
player_turn();
else:
computer_turn();
#Quit if someone wins
if player_score >= 100 :
print("YOU WIN !");
print("Congratulations !");
break;
elif computer_score >= 100 :
print("COMPUTER WINS");
print("Sorry. Come back soon... and retry ;)");
break;
#That' all, Folks !
print("End of the game !");
print("Final scores : ");
print("Player : {p} pts | vs | Computer : {c} pts\n".format(p=player_score, c=computer_score))
print("Goodbye !");
## Handles a player round
#
def player_turn():
print("PLAYER TURN !");
print("-------------");
turn_score = 0;
keep_playing=True;
raw_input("Press ENTER to throw the dice...");
while keep_playing:
dice = random.randint(1, 6);
if dice==1:
print("Too Bad... it's a 1");
turn_score=0;
break;
else:
print("Got a {d}".format(d=dice));
turn_score += dice;
print("Your turn-score is {ts} pts".format(ts=turn_score));
while True:
key = raw_input("Keep playing ? y/n (a 1 will make you lose the turn...");
if key == 'y':
break;
elif key == 'n':
keep_playing = False;
break;
else :
print("I don't understand your answer... ");
raw_input("You won {p} points (press enter to continue)\n".format(p=turn_score));
global player_score;
player_score += turn_score;
## Handles a computer round
#
def computer_turn():
print("COMPUTER TURN !");
print("---------------");
turn_score = 0;
keep_playing=True;
while keep_playing:
print("Throwing the dice...");
dice = random.randint(1, 6);
if dice==1:
print("Too Bad... it's a 1");
turn_score=0;
break;
else:
print("Got a {d}".format(d=dice));
turn_score += dice;
print("Keep playing ? y/n (a 1 will make you lose the turn...")
key = random.choice(['y', 'y', 'y', 'y', 'y', 'n',]);
print("{k}".format(k=key));
if key == 'y':
continue;
elif key == 'n':
keep_playing = False;
break;
raw_input("Computer won {p} points (press enter to continue)\n".format(p=turn_score));
global computer_score;
computer_score += turn_score;
#Launch the game !
main();
|
UTF-8
|
Python
| false | false | 4,210 |
py
| 20 |
S1_dice.py
| 18 | 0.456532 | 0.447743 | 0 | 139 | 29.258993 | 104 |
ayush-sri323/Feedback
| 7,249,904,812,459 |
f73f4a0d6f96c09dd12d00c40f005bbee3aea41a
|
5cfc94c1ae77ae140e639952045fe0454a0263af
|
/review/urls.py
|
ef20497355d40014b2b0c170705c73a0793070eb
|
[] |
no_license
|
https://github.com/ayush-sri323/Feedback
|
b4e65bd9a8dd08bb8978739ce7735f23f41b26e9
|
3c833c44681046ef50b42489f532b0a26b2a3759
|
refs/heads/master
| 2023-06-09T15:01:00.479432 | 2021-07-03T17:44:59 | 2021-07-03T17:44:59 | 382,660,526 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('',views.ReviewView.as_view()),
path('thank_you',views.thank_you.as_view()),
path('Review List',views.ReviewList.as_view(), name = 'review list'),
path('favorite',views.AddfavoriteReview.as_view()),
path('Detail Reviews<int:pk>',views.DetailReview.as_view(),name='Detail review')
]
|
UTF-8
|
Python
| false | false | 373 |
py
| 7 |
urls.py
| 5 | 0.686327 | 0.686327 | 0 | 10 | 36.3 | 84 |
jun-harashima/msnc
| 2,834,678,443,548 |
807afd48f45fb260c44a5bf5156cd46d5ecf0c8f
|
466218363656c212e29d10ac288fb00818d81adf
|
/msnc/dataset.py
|
66d83e062fd5eebbf86e4dd0176110db844f8956
|
[
"MIT"
] |
permissive
|
https://github.com/jun-harashima/msnc
|
9c791b8a01ff3079791c738bce88d37e8698cb72
|
f6770b183a6bc65f1774deeaee3b326fca326770
|
refs/heads/master
| 2023-01-09T22:51:12.164799 | 2021-06-15T08:44:50 | 2021-06-15T08:44:50 | 187,233,556 | 1 | 1 |
MIT
| false | 2022-12-26T20:47:38 | 2019-05-17T14:45:55 | 2021-06-15T08:44:53 | 2022-12-26T20:47:38 | 86 | 1 | 1 | 2 |
Python
| false | false |
# x: a token (e.g., character, subword, word).
# X: a sequence of tokens.
# X_set: a set of sequences.
# xs: a concatnation of sets.
# y: a number for an example
# ys: numbers for examples
import math
from msnc.util import Util
class Dataset():
def __init__(self, examples, x_to_index=None, isregression=False):
self.util = Util()
self.pad_index = self.util.PAD_INDEX
self.unk_index = self.util.UNK_INDEX
X_sets = [[example['Xs'][i] for example in examples]
for i in range(len(examples[0]['Xs']))]
self.x_to_index = x_to_index
if x_to_index is None:
self.x_to_index = []
for i in range(len(examples[0]['Xs'])):
xs = [x for X in X_sets[i] for x in X]
self.x_to_index.append(self._make_index(xs))
self.Xs = []
self.raw_Xs = [] # for debug
for i in range(len(examples[0]['Xs'])):
self.Xs.append(self._degitize(X_sets[i], self.x_to_index[i]))
self.raw_Xs.append(X_sets[i])
# indices
self.indices = [example['index'] for example in examples]
if isregression:
self.ys = [math.log10(example['y']) for example in examples]
else:
self.ys = [example['y'] for example in examples]
def _make_index(self, xs):
x_to_index = {'<PAD>': self.pad_index, '<UNK>': self.unk_index}
for x in xs:
if x not in x_to_index:
x_to_index[x] = len(x_to_index)
return x_to_index
def _get_index(self, x, x_to_index):
if x not in x_to_index:
return x_to_index['<UNK>']
return x_to_index[x]
def _degitize(self, X_set, x_to_index):
X = []
for _X in X_set:
_X = [self._get_index(x, x_to_index) for x in _X]
X.append(_X)
return X
def split(self, batch_size):
example_num = len(self.Xs[0])
batch_num = int(example_num / batch_size)
batches = [[] for _ in range(batch_num)]
for X_set in self.Xs:
self._append(batches, X_set, batch_size)
self._append(batches, self.ys, batch_size)
return batches
def _append(self, batches, Z_set, batch_size): # Z_set is X_set or ys
for i in range(len(batches)):
start = batch_size * i
end = batch_size * (i + 1)
batches[i].append(Z_set[start:end])
|
UTF-8
|
Python
| false | false | 2,439 |
py
| 14 |
dataset.py
| 9 | 0.536695 | 0.533825 | 0 | 74 | 31.959459 | 74 |
Rohan-Deshamudre/Smart-traffic-management-system
| 9,337,258,906,605 |
b75dbab737eeae717fc151b2aae36aea78d4ed77
|
3f0f3d2d20581a247bffc1aa36fdd1ff50bca968
|
/backend/api/instruments/instrument_actions/compression.py
|
bdb684555d9adb8dadb0b2a3e161dda6977a8936
|
[] |
no_license
|
https://github.com/Rohan-Deshamudre/Smart-traffic-management-system
|
1c92a4de26c55d103d954c53d0cd207bba68a26b
|
618440303dcbf819cd61aa5e593b4b50483f0070
|
refs/heads/master
| 2022-12-09T14:51:21.206001 | 2020-06-25T16:27:15 | 2020-06-25T16:27:15 | 295,766,737 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from api.instruments.instrument_actions.models import InstrumentAction
from api.road_conditions.road_condition_actions.models import \
RoadConditionActionToInstrumentAction
id_key = 'id'
def to_json_instrument_action(instrument_action: InstrumentAction):
instrument_action_object = {id_key: instrument_action.id}
return instrument_action_object
def import_instrument_action(json_instrument_action, road_condition_action_id):
RoadConditionActionToInstrumentAction(
road_condition_action_id=road_condition_action_id,
instrument_action_id=json_instrument_action[id_key]).save()
|
UTF-8
|
Python
| false | false | 613 |
py
| 330 |
compression.py
| 170 | 0.786297 | 0.786297 | 0 | 17 | 35.058824 | 79 |
lakshmikodi/Python
| 16,260,746,189,749 |
68d23754db466aac2081869c88a69892dcba5cfa
|
e1a98eb88f719108250b8ef31070a72b830f3f98
|
/String Datatypes/Data_Types_String_Example_Methods_1-23.py
|
ff54405c380b2dc4e5046b773784a65d663f2f6d
|
[] |
no_license
|
https://github.com/lakshmikodi/Python
|
bcaf181b751576b5731dd83e991782e86f233a88
|
4346ded89f6b4d747913513ca6d3f0cf930953c5
|
refs/heads/master
| 2020-12-07T15:23:47.676376 | 2017-06-27T02:59:32 | 2017-06-27T02:59:32 | 95,507,980 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
strLower = "a this a is a lower case string"
#0123456789101112131415161718
strLow = "spyder and pycharm"
'''
strUpper = "THIS IS AN UPPER CASE STRING"
numSet = '123'
print(strLower.capitalize())
print(len(strLow))
print(type(strLow))
'''
print (strLow.center(23, '*'),len(strLow))
'''
print (strLower.count('a',2, 15))
#print (strLower.encode('utf_8', 'strict'))
#print (strLower.encode('cp1252', 'strict'))
#tempStr = "dGhpcyBpcyBhIGxvd2VyIGNhc2Ugc3RyaW5n"
#print (tempStr.decode('utf_8', 'strict'))
#print (tempStr.decode('cp1252', 'strict'))
print (strUpper.endswith('string'))
print (strLower.find('lower'))
print (numSet.isdigit())
'''
|
UTF-8
|
Python
| false | false | 672 |
py
| 18 |
Data_Types_String_Example_Methods_1-23.py
| 17 | 0.690476 | 0.616071 | 0 | 26 | 24.846154 | 49 |
Biking0/test
| 4,363,686,782,748 |
214496c29e468a53af6a50db62496d81652b69b7
|
92eee50b8dcdace89077f31065e56322c9920456
|
/202107/test.py
|
ba5100d17e13f174bc7f09810acdd25bf2c16e70
|
[] |
no_license
|
https://github.com/Biking0/test
|
37878b7c2eb61bf0224459b4a313fadaa437534a
|
3d57193c56bd2c9b6c8ad9c5b3fb785de4a90d9a
|
refs/heads/master
| 2021-07-23T18:43:26.693462 | 2021-07-14T16:06:36 | 2021-07-14T16:06:36 | 125,614,881 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import akshare as ak
stock_circulate_stock_holder_df = ak.stock_circulate_stock_holder(stock="600000")
# print(stock_circulate_stock_holder_df)
print(stock_circulate_stock_holder_df[0:3])
# print(stock_circulate_stock_holder_df[0:22]['占流通股比例(%)'])
new_sum=0
old_sum=0
for i in stock_circulate_stock_holder_df[0:10]['占流通股比例(%)']:
# print(i)
new_sum=new_sum+round(float(i),2)
for j in stock_circulate_stock_holder_df[10:20]['占流通股比例(%)']:
# print(j)
old_sum=old_sum+round(float(j),2)
# print('11',new_sum)
print(old_sum,new_sum,new_sum-old_sum)
|
UTF-8
|
Python
| false | false | 597 |
py
| 150 |
test.py
| 122 | 0.695187 | 0.652406 | 0 | 19 | 28.578947 | 81 |
abs-tudelft/vhdmmio
| 6,614,249,662,022 |
74540e8fdb1ec0caf18ea2f2f8bb171bd9c51121
|
ea97980b8c9dbbf987b2e470cea11cedde8efb9f
|
/vhdmmio/config/interrupt.py
|
544be0503375ced927bad399a4eb39eee7cb3c4e
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/abs-tudelft/vhdmmio
|
634ec13fc9ce573d0628336195bdd1dcece1db1a
|
d0417925cd72dfb973431d6948e65b662a75c5fa
|
refs/heads/master
| 2023-06-22T21:04:01.548057 | 2023-06-19T16:15:11 | 2023-06-19T16:15:11 | 185,991,290 | 5 | 1 |
Apache-2.0
| false | 2023-06-19T16:15:12 | 2019-05-10T13:15:28 | 2023-06-06T14:57:14 | 2023-06-19T16:15:12 | 1,549 | 7 | 2 | 1 |
Python
| false | false |
"""Submodule for `InterruptConfig` configurable."""
import re
from ..configurable import configurable, Configurable, choice, embedded
from .metadata import MetadataConfig
@configurable(name='Interrupt descriptors')
class InterruptConfig(Configurable):
r"""In addition to MMIO, `vhdmmio` can handle interrupt routing for you.
Each AXI4-lite bus is equiped with an additional signal in the
slave-to-master direction that serves as an interrupt request flag. This
flag is connected to a (masked) wired-or network of any incoming interrupts
you define.
## Behavior
The interrupts can be monitored and controlled through fields with the
(`interrupt`)[interrupt.md] behavior.
There are up to three internal registers for each interrupt, named `enab`
(short for enable), `flag`, and `umsk` (short for unmask). `enab` controls
whether incoming interrupts are passed on to the flag register. The flag
register stores whether the interrupt is pending regardless of whether it
is enabled; if an interrupt comes in while the interrupt is enabled, and
the interrupt is then disabled, the flag remains asserted until it is
explicitly cleared (usually by an interrupt handler). `umsk` (unmask) has a
similar function, but is placed after the flag register. Thus, masking an
interrupt immediately stops it from being requested, but once the interrupt
is unmasked again, it will be requested again. This logic is shown
schematically below.
```
.--[raw>
| ____ flag
IRQ --------o--------| \ _____ .----. .-[flag>
| )----\ \ |> | | ____
.--|____/ ) )---|S Q|---o--------| \ to
enab | [pend>-/____/ .-|R | umsk | )--> wired
.----. | [clear>--------' '----' .----. .--|____/ OR
|> | | |> | |
[enable>--|S Q|--o--[enabled> [unmask>-|S Q|--o--[unmasked>
[disable>-|R | [mask>---|R |
'----' '----'
```
Each of the three registers are accessible in read, write, set, and clear
modes through fields with (`interrupt`)[interrupt.md] behavior. The raw
incoming interrupt signal and the masked output signal of an interrupt can
also be monitored directly.
Interrupts can be made level-sensitive by not specifying a way to clear the
interrupt. In this case, the logic is automatically simplified to the
following.
```
.--[raw>
| ____ .-[flag>
IRQ --------o--------| \ | ____
| )---------------o--------| \ to
.--|____/ | )--> wired
enab | umsk .--|____/ OR
.----. | .----. |
|> | | |> | |
[enable>--|S Q|--o--[enabled> [unmask>-|S Q|--o--[unmasked>
[disable>-|R | [mask>---|R |
'----' '----'
```
Furthermore, if there is no way to enable/unmask an interrupt, the
respective AND gate and the register is effectively optimized away. If
there *is* a way, the reset state is disabled/masked.
## Interrupt sources
A `vhdmmio` interrupt can currently be requested through an internal or
synchronous external signal, or by software using the
[`interrupt-pend`](interruptpend.md) field behavior. An external
synchronizer is needed to accept asynchronous interrupts. These are often
vendor-specific, therefore they are not included in vhdmmio."""
#pylint: disable=E0211,E0213,E0202
@choice
def repeat():
"""This value specifies whether this interrupt descriptor describes a
single interrupt or an array of interrupts."""
yield None, 'the descriptor describes a single interrupt.'
yield (1, None), ('the descriptor describes an array of interrupts of '
'the given size.')
@embedded
def metadata():
"""This configuration structure is used to name and document the
interrupt."""
return MetadataConfig
@choice
def internal():
"""This key specifies whether the interrupt is requested by an internal
or external signal."""
yield None, 'the interrupt request source is an input port.'
yield (re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'),
'the interrupt request source is the internal signal with the '
'given name. The arrayness of the signal must match this '
'interrupt\'s repetition. Level-sensitive interrupts cannot be '
'associated with strobe signals.')
@choice
def active():
"""This key specifies the event that the interrupt is sensitive to."""
yield 'high', 'the interrupt is level/strobe-sensitive, active-high.'
yield 'low', 'the interrupt is level/strobe-sensitive, active-low.'
yield 'rising', 'the interrupt is rising-edge sensitive.'
yield 'falling', 'the interrupt is falling-edge sensitive.'
yield 'edge', 'the interrupt is sensitive to any edge.'
@choice
def group():
"""The interrupt request port for the internal signal can optionally be
grouped along with other ports in a record. This key specifies the name
of the group record."""
yield None, 'port grouping is determined by the global default.'
yield False, 'the port is not grouped in a record.'
yield (re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'),
'the port is grouped in a record with the specified name.')
|
UTF-8
|
Python
| false | false | 5,947 |
py
| 185 |
interrupt.py
| 118 | 0.565663 | 0.562637 | 0 | 124 | 46.959677 | 80 |
Sahel95/messenger-backend
| 15,771,119,936,875 |
3e2e498c823c901a2f7b30239879dc8801894082
|
beb5cdab2ec52c5cc53fd82bc9d700e268d2a2d7
|
/user/models.py
|
beaf9d50404c08c7df8cbb836adadd35c45aadc7
|
[] |
no_license
|
https://github.com/Sahel95/messenger-backend
|
d30e02690927e3e15ed9e7ab4282f83f28daf01b
|
db9b0ce88163969c343b91c465cadeed828c2e33
|
refs/heads/master
| 2020-07-09T03:22:00.319634 | 2019-09-17T06:06:17 | 2019-09-17T06:06:17 | 203,861,256 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
# from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
import uuid
class Users(AbstractUser):
verificationtoken = models.UUIDField(
default=uuid.uuid4,
null=True
)
is_verified = models.BooleanField(
null=True,
default=False
)
# token= models.CharField(
# null=True,
# max_length=200
# )
profile_pic = models.ImageField(
upload_to='profile-pictures',
null=True,
blank=True
)
contacts = models.ManyToManyField(
'self',
related_name='contactlist',
default=None
)
def __str__(self):
return self.first_name + ' ' + self.last_name
# class ContactList(models.models):
# user_id = models.ForeignKey(
# Users,
# on_delete=models.CASCADE,
# related_name='name'
# )
# members = models.ManyToManyField(Users, related_name='contactlist')
#
# def __str__(self):
# return self.user_id
|
UTF-8
|
Python
| false | false | 1,043 |
py
| 36 |
models.py
| 34 | 0.601151 | 0.597315 | 0 | 44 | 22.704545 | 73 |
MysticSoul/Codes
| 4,999,341,970,830 |
cda944273310ebd4950c6f90179bb7ac6ae3a4c1
|
deb07632f77383bea0a79bc1f62924b5db72fd1f
|
/python/learning.py
|
795c538c01e62fdd822e49419363f0ec6d81cddb
|
[] |
no_license
|
https://github.com/MysticSoul/Codes
|
e559677e5badd4504794ebc38c2309318dc35c5d
|
c61d1206b7cd42ac9d2d8dab38d4653ce129c9b3
|
refs/heads/master
| 2020-07-24T07:58:08.943033 | 2019-09-11T09:18:03 | 2019-09-11T09:18:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#learning loops
#n = input("Enter the length of the array : ")
a = [1, 2, 3, 4, 5, 6, 7, 8]
print("Prefixes of", a)
for i in range(0, len(a) + 1):
print(a[0:i])
print("----------------------------------")
print("Suffixes of" , a)
for i in range(0, len(a) + 1):
print(a[i:len(a)+1])
|
UTF-8
|
Python
| false | false | 290 |
py
| 146 |
learning.py
| 128 | 0.493103 | 0.444828 | 0 | 10 | 28.1 | 47 |
avenrainbow/aven_su
| 3,822,520,939,828 |
6f1a42f88ca680a7a7875b0e7598cc025aee4e26
|
536ea383510428e49c3304fd957e4ae27fa13f80
|
/com.aven.jdk.python/test/RestHandlerTest.py
|
99a44b2b493f1fe4ca8a5635576fd560f35eded3
|
[] |
no_license
|
https://github.com/avenrainbow/aven_su
|
716e3d0d77f607cb481c5e5c1bef45239d7bc35c
|
f985123a164d5c2f8cfd5deef26d37c60ac9eae3
|
refs/heads/master
| 2016-09-06T10:07:13.539963 | 2015-07-30T07:55:45 | 2015-07-30T07:55:45 | 39,938,124 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding=utf8
'''
Created on 2014-4-10
@author: Kee.Li
'''
from com.huadi.winserver.RestHandler import DataConnect
from com.huadi.winserver import RestHandler
'''
GET请求Rest接口demo
获取所有虚拟机
'''
def testGet():
#连接参数
dataConnect = DataConnect()
dataConnect.ip = "10.0.0.100"
dataConnect.port = "8080"
dataConnect.account = "admin"
dataConnect.password = "passw0rd"
apiKey = "wce/api/vms"
params = "name=服务器"
placeholder = ""
data = ""
RestHandler.getRest(dataConnect, apiKey, params, placeholder, data);
'''
PUT请求Rest接口demo
虚拟机开关机
'''
def testPut():
#连接参数
dataConnect = DataConnect()
dataConnect.ip = "10.0.0.100"
dataConnect.port = "8080"
dataConnect.account = "admin"
dataConnect.password = "passw0rd"
apiKey = "wce/api/vms/{0}"
params = ""
placeholder = "449"
data = "{'state':'OK'}"
RestHandler.putRest(dataConnect, apiKey, params, placeholder, data);
'''
DELETE请求Rest接口demo
删除虚拟机
'''
def testDelete():
#连接参数
dataConnect = DataConnect()
dataConnect.ip = "10.0.0.100"
dataConnect.port = "8080"
dataConnect.account = "admin"
dataConnect.password = "passw0rd"
apiKey = "wce/api/vms/{0}"
params = ""
placeholder = "515"
data = ""
RestHandler.deleteRest(dataConnect, apiKey, params, placeholder, data);
def testPost():
#连接参数
dataConnect = DataConnect()
dataConnect.ip = "10.0.0.100"
dataConnect.port = "8080"
dataConnect.account = "admin"
dataConnect.password = "passw0rd"
apiKey = "wce/api/vms"
params = ""
placeholder = ""
data = '{"name":"kee_vm_中文","targetId":"17","state":"EXECUTING","targetType":"host","vcpu":"1","cpu":0.3,"memory":1024,"disk":"8","storagePoolId":"91","ip":"192.168.100.17","templateId":"169","vnetConfig":{ "vnetwokrId":"171", "gateway":"192.168.100.1", "netmask":"255.255.255.0", "qos":""},"customization": {"cpuWeight":384}}'
RestHandler.postRest(dataConnect, apiKey, params, placeholder, data);
if __name__ == '__main__':
testGet()
#testPut()
#testDelete()
#testPost()
|
UTF-8
|
Python
| false | false | 2,315 |
py
| 33 |
RestHandlerTest.py
| 23 | 0.601898 | 0.54948 | 0 | 98 | 21.489796 | 346 |
scottyuecao/RIXS-utilities
| 15,307,263,477,441 |
0db4b758a971a784966ac6bd3714de531c3a1510
|
e1b999ea5a734ce2cd7c39be08bcbc243216ba2b
|
/SLS_examples/get_plot_Emaps.py
|
58d0f7985aab3c472f26f1190c2fe292162c99cd
|
[] |
no_license
|
https://github.com/scottyuecao/RIXS-utilities
|
870f279e2fdbf97f47a79f09fa634c7ea4cd43cd
|
1a20198f27084351c06aba7c2b640503ce02a9b1
|
refs/heads/master
| 2021-01-24T02:18:10.415188 | 2015-03-17T19:48:50 | 2015-03-17T19:48:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.interpolate
plt.ion()
def getParam(filename, param):
fid = open(filename)
alltxt = fid.read().lower() # everything in lowercase
param = param.lower()
for line in alltxt.splitlines():
if line.find(param) != -1:
variable, valuestr = line.split(' = ')
try:
return float(valuestr)
except NameError:
print "{} not found in file {}".format(param, filename)
def getM(scan_list, base_name, eloss, ein, IDPolar_val):
epoints = []
for scanno in scan_list:
filename = '{}{:04d}.dat'.format(base_name, scanno)
if getParam(filename, 'idpolar') == IDPolar_val:
epoints.append(getParam(filename, 'monoenergy'))
epoints = np.array(epoints)
print epoints
M_at_epoints = np.zeros((len(eloss),len(epoints)))
i = 0
for scanno in scan_list:
filename = '{}{:04d}.dat'.format(base_name, scanno)
if getParam(filename, 'idpolar') == IDPolar_val:
print filename
data = np.loadtxt(filename)
plt.plot(-1*data[:,0], data[:,1], label=filename[-8:-4])
f = scipy.interpolate.interp1d(-1*data[:,0], data[:,1], kind='linear', bounds_error=True, fill_value=np.nan) # last two added MPMD
M_at_epoints[:,i] = f(eloss)
i += 1
f = scipy.interpolate.interp2d(epoints, eloss, M_at_epoints, kind='linear', bounds_error=False, fill_value=np.nan)
M = f(ein, eloss)
return M
#return M, M_at_epoints, epoints
# vectors to interpolate on
eloss = np.linspace(-1, 8, 900)
ein = np.arange(851, 857.3, 0.1)
# LTNAO
#scan_list = range(412,467+1)
scan_list = range(430, 435)
base_name = '../RIXS/LTNAO_L3_l2/LTNAO_'
IDPolar_val = 0.0 # LH
print "############ RA_LH ###############"
plt.clf()
M_LTNAO_LH = getM(scan_list, base_name, eloss, ein, IDPolar_val)
plt.show()
print "############ RA_LV ###############"
IDPolar_val = 1.0 # LV
M_LTNAO_LV = getM(scan_list, base_name, eloss, ein, IDPolar_val)
plt.legend()
|
UTF-8
|
Python
| false | false | 2,087 |
py
| 3 |
get_plot_Emaps.py
| 3 | 0.598467 | 0.574509 | 0 | 66 | 30.621212 | 142 |
huawei-noah/HEBO
| 9,337,258,921,341 |
53d18340a0133c28c077e9c06eda0117b26461c2
|
5072e65b8b10b333fff2142bfc70896d4abca2b4
|
/HEBO/archived_submissions/hebo/bo/models/scalers.py
|
e0b75e69037bdd94dc2803b788c73130d2e5c12b
|
[
"MIT"
] |
permissive
|
https://github.com/huawei-noah/HEBO
|
a0ececc2fe9812fd319d8ff4af1d5706276937bc
|
2bccb4edef79711ff81d660b80c9121b1bb95c82
|
refs/heads/master
| 2023-09-01T08:14:57.603747 | 2023-08-30T15:39:48 | 2023-08-30T15:39:48 | 404,973,348 | 1,748 | 313 | null | false | 2023-08-23T20:36:22 | 2021-09-10T06:02:05 | 2023-08-23T18:35:20 | 2023-08-23T20:36:21 | 152,176 | 1,882 | 362 | 10 |
Jupyter Notebook
| false | false |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify it under
# the terms of the MIT license.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the MIT License for more details.
import sys
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as F
class TorchIdentityScaler(nn.Module):
def __init__(self):
super().__init__()
def fit(self, x : torch.FloatTensor):
return self
def forward(self, x : torch.FloatTensor) -> torch.FloatTensor:
return x
def transform(self, x : torch.FloatTensor) -> torch.FloatTensor:
return self.forward(x)
def inverse_transform(self, x : torch.FloatTensor) -> torch.FloatTensor:
return x
class TorchStandardScaler(nn.Module):
def __init__(self):
super().__init__()
self.mean = None
self.std = None
self.fitted = False
def fit(self, x : torch.FloatTensor):
assert(x.dim() == 2)
with torch.no_grad():
scaler = StandardScaler()
scaler.fit(x.detach().numpy())
self.mean = torch.FloatTensor(scaler.mean_.copy()).view(-1)
self.std = torch.FloatTensor(scaler.scale_.copy()).view(-1)
invalid = ~(torch.isfinite(self.mean) & torch.isfinite(self.std))
self.mean[invalid] = 0. # somethime we face data with some all-NaN columns
self.std[invalid] = 1.
return self
def forward(self, x : torch.FloatTensor) -> torch.FloatTensor:
return (x - self.mean) / self.std
def transform(self, x : torch.FloatTensor) -> torch.FloatTensor:
return self.forward(x)
def inverse_transform(self, x : torch.FloatTensor) -> torch.FloatTensor:
return x * self.std + self.mean
class TorchMinMaxScaler(nn.Module):
def __init__(self, range : tuple = (0, 1)):
super().__init__()
self.range_lb = range[0]
self.range_ub = range[1]
self.data_lb = None
self.data_ub = None
self.fitted = False
def fit(self, x : torch.FloatTensor):
assert(x.dim() == 2)
with torch.no_grad():
self.data_lb = x.min(dim = 0).values.detach().clone()
self.data_ub = x.max(dim = 0).values.detach().clone()
self.fitted = True
assert(torch.isfinite(self.data_lb).all())
assert(torch.isfinite(self.data_ub).all())
return self
def to_unit(self, x, lb, ub):
return (x - lb) / (ub - lb)
def forward(self, x : torch.FloatTensor) -> torch.FloatTensor:
return self.to_unit(x, self.data_lb, self.data_ub) * (self.range_ub - self.range_lb) + self.range_lb
def transform(self, x : torch.FloatTensor) -> torch.FloatTensor:
return self.forward(x)
def inverse_transform(self, x : torch.FloatTensor) -> torch.FloatTensor:
return self.to_unit(x, self.range_lb, self.range_ub) * (self.data_ub - self.data_lb) + self.data_lb
|
UTF-8
|
Python
| false | false | 3,355 |
py
| 747 |
scalers.py
| 633 | 0.604471 | 0.599702 | 0 | 91 | 34.868132 | 108 |
tomasra/venus_volcanoes
| 13,932,873,941,191 |
a1c6f883b9f734febbb3dd704c65acae373bd121
|
da1843a1ededfd6f3470965f0024391912c1e299
|
/lib/experiments/__init__.py
|
b453c20f2a7f62b3e6f76034404cfbf51c7532c7
|
[] |
no_license
|
https://github.com/tomasra/venus_volcanoes
|
6cda36be507690ad17b4c2c7ead87f08de90765c
|
1db7c8b46db25c9f158dfa96a8d22fde4e12c43d
|
refs/heads/master
| 2020-05-31T20:51:29.821742 | 2014-05-24T11:37:32 | 2014-05-24T11:37:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from experiment import Experiment
from experiment_set import ExperimentSet
|
UTF-8
|
Python
| false | false | 74 |
py
| 32 |
__init__.py
| 26 | 0.891892 | 0.891892 | 0 | 2 | 36.5 | 40 |
jarekzha/JingDongSpider
| 2,748,779,076,708 |
44d8aeef82eb5b96fd1ea5c95154adb40b6ee311
|
baf93a6c55b9784d0ffe61410e5728bcb778e671
|
/Crawler/crawler.py
|
e696fb22a76b89aa9c1220cae15e1f70352826c2
|
[] |
no_license
|
https://github.com/jarekzha/JingDongSpider
|
a766c9a04aae35e1995ecd899e0fff52e2d8a9ac
|
a37a4299f1a23fd9d5303fa56e5bdb4259b5f86d
|
refs/heads/master
| 2021-01-21T11:38:00.422899 | 2016-05-12T15:03:31 | 2016-05-12T15:03:33 | 48,053,324 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2016年4月9日
@author: jarekzhang
'''
import config
from jdItemPage import JDItemPage
from mysqlSaver import MysqlSaver
import codecs
import time
if __name__ == '__main__':
''' 解析页面 '''
itemPage = JDItemPage(config.URL)
itemPage.parsePage()
''' 写日志 '''
with codecs.open(config.LOG_FILE, 'a', 'utf-8') as logFile:
infoStr = u'%s itemID:%s, itemName:%s, price:%s, inStock:%s \n' % (time.ctime(), itemPage.itemID,
itemPage.itemName, itemPage.price, itemPage.inStock)
logFile.write(infoStr)
saver = MysqlSaver()
saver.execute(itemPage)
|
UTF-8
|
Python
| false | false | 687 |
py
| 10 |
crawler.py
| 9 | 0.61919 | 0.607196 | 0 | 27 | 23.481481 | 106 |
DavidGG66/active-nlp
| 10,428,180,633,499 |
615431716ee5444782bfbc9a0f6989c95533d7fc
|
8a4cf805b293fab0c0c844794a1b8f1688bff264
|
/src/network/node.py
|
0d5b444911201e2e9fd82f447d553be16b6c4d09
|
[] |
no_license
|
https://github.com/DavidGG66/active-nlp
|
906b18e37a68c92d508c0a446c5b65fcd9045751
|
508beec00eca482319d8f7f1e972e0e6b09fd158
|
refs/heads/master
| 2020-09-01T07:25:07.601575 | 2020-03-14T00:34:58 | 2020-03-14T00:34:58 | 218,907,862 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Parser nodes and networks
class Error(Exception):
pass
class SynCat():
def __init__(self):
self.cat = None
self.features = {}
class LexCat(SynCat):
def __init__(self):
SynCat.__init__(self)
self.lex = {}
class PhraseCat(SynCat):
def __init__(self):
SynCat.__init__(self)
self.filled = []
self.open = []
class Node():
def __init__(self):
self.activation = None
self.range = None
self.parents = []
self.__analysis = None
self.operations = []
def add_analysis(self, analysis):
if analysis.cat == None:
raise SynCatError(analysis, "No category set")
self.__analysis = analysis
def get_analysis(self):
return self.__analysis
class NodeError(Error):
def __init__(self, node, message):
self.node = node,
self.message = message
class Network():
def __init__(self):
self.__next_node = 1
self.__nodes = {}
def add_node(self, node):
if node.activation == None:
raise NodeError(node, "No activation set")
elif node.range == None:
raise NodeError(node, "No range set")
elif node.analysis == None:
raise NodeError(node, "No analysis set")
self.__nodes[self.__next_node] = node
self.next_node += 1
def get_node(self, idx):
return self.__nodes[idx]
|
UTF-8
|
Python
| false | false | 1,462 |
py
| 43 |
node.py
| 25 | 0.53762 | 0.536252 | 0 | 67 | 20.820896 | 58 |
ericfp87/Day-26-exercise3
| 13,194,139,576,574 |
14a66cef35a40c4fefaa47db3b931e50c0a2e1f8
|
e09416565a0a7825021be242ef2597d009b0c319
|
/main.py
|
85ded4487d29f972d222c84de9e71ebc5de580a4
|
[] |
no_license
|
https://github.com/ericfp87/Day-26-exercise3
|
cd6bb7e74155a554ff6ec62c8c6a4f40ac5a3b92
|
c8f7bc0c144a97790eac922cfa0e5f650729d66b
|
refs/heads/master
| 2023-03-18T10:45:08.222936 | 2021-03-08T13:17:14 | 2021-03-08T13:17:14 | 345,661,839 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
with open("file1.txt") as file1:
list1 = file1.readlines()
with open("file2.txt") as file2:
list2 = file2.readlines()
result= [int(number) for number in list1 if number in list2]
# Write your code above 👆
print(result)
|
UTF-8
|
Python
| false | false | 236 |
py
| 1 |
main.py
| 1 | 0.682403 | 0.639485 | 0 | 11 | 20 | 60 |
vene/ambra
| 4,827,543,255,159 |
91938ea1184225b740711bebd1621a2516158975
|
0b517fd0ca467fdfccf1ba1f32bd652ae95349c2
|
/ambra/features.py
|
4a8047954788c4f6e683fa5197b83b302d4938d5
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/vene/ambra
|
520e721eeeae8a44e38fff241b9a738519f2e33e
|
e220dbb332b86ed60ecb025efbefbb33080118b7
|
refs/heads/master
| 2016-09-06T12:42:34.633206 | 2015-01-30T21:40:31 | 2015-01-30T21:40:31 | 27,673,648 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
class LengthFeatures(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def _doc_features(self, doc):
n_sents = len(doc)
all_toks = [tok for sent in doc for tok in sent]
n_tokens = len(all_toks)
n_types = len(set(all_toks))
#type_token_ratio = n_tokens / float(n_types)
return np.array([n_sents, n_tokens, n_types,
#type_token_ratio
],
dtype=np.float)
def transform(self, X, y=None):
#x in X is a list of sents
return np.row_stack([self._doc_features(doc) for doc in X])
def get_feature_names(self):
return ['n_sents', 'n_tokens', 'n_types',
#'type_token_ratio'
]
class StylisticFeatures(BaseEstimator, TransformerMixin):
def __init__(self, lower=True):
self.lower = lower
def fit(self, X, y=None):
return self
def _doc_features(self, doc):
# doc is a dict
tokens = doc['tokens']
lemmas = doc['lemmas']
all_tokens = [w.lower() if self.lower else w
for sent in tokens for w in sent]
all_lemmas = [w.lower() if self.lower else w
for sent in lemmas for w in sent]
avg_sent_len = np.mean([len(sent) for sent in tokens])
avg_word_len = np.mean([len(w) for w in tokens])
lex_density = len(set(all_tokens)) / len(all_tokens)
lex_richness = len(set(all_lemmas)) / len(all_lemmas)
return np.array([avg_sent_len, avg_word_len, lex_density, lex_richness],
dtype=np.float)
def transform(self, X, y=None):
#x in X is a list of sents
return np.row_stack([self._doc_features(doc) for doc in X])
def get_feature_names(self):
return ['ASL', 'AWL', 'LD', 'LR']
class NgramLolAnalyzer(BaseEstimator):
"""Analyzer for pre-tokenized list-of-lists sentences-words structures"""
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
if self.lower:
tokens = [w.lower() for w in tokens]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def __init__(self, ngram_range=(1,1), lower=False):
self.ngram_range=ngram_range
self.lower = lower
def __call__(self, doc):
return [feature for sentence in doc
for feature in self._word_ngrams(sentence)]
|
UTF-8
|
Python
| false | false | 3,114 |
py
| 15 |
features.py
| 14 | 0.560694 | 0.558767 | 0 | 86 | 35.209302 | 80 |
xieguotian/caffe
| 16,295,105,957,126 |
bea8cac2f7c0e89948272e2b110e8a391df0392c
|
11d564f26a40d5b5da034568ff4bf3ffec8b94db
|
/python/convert_model_triplet.py
|
b6774cf29fcd1579d08c210ba0a9cb046e7d043a
|
[
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
https://github.com/xieguotian/caffe
|
110673db35c41360a5ecdbec6e64a8d2cda31784
|
fed8340608102c489ed6a1e61f2890cf0b0079ea
|
refs/heads/master
| 2020-04-04T04:23:01.004276 | 2017-11-23T07:34:20 | 2017-11-23T07:34:33 | 38,590,922 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import os
if len(sys.argv)<3:
print 'Usage: python convert_model.py net_proto net_posfix'
net_proto = sys.argv[1]
pos_fix = sys.argv[2]
base_name = os.path.basename(net_proto).strip().split('.')
net_new_proto = base_name[0]+'_'+pos_fix+'.'+base_name[1]
with open(net_new_proto,'w') as fout:
with open(net_proto) as fid:
count = 0
print >>fout,"################ net 1 ############"
for line in fid:
print >> fout,line.strip('\n')
if 'lr_mult' in line:
print >>fout,'name: \"w%d\"'%(count)
count+=1
with open(net_proto) as fid:
print >>fout
print >>fout,"################ net 2 ############"
print >>fout
count = 0
for line in fid:
line = line.strip('\n')
if 'bottom' in line or 'top' in line or 'name' in line:
if not 'data' in line:
pos = line.rfind('\"')
line = line[:pos] + '_' + pos_fix +'1' +line[pos:]
if 'lr_mult' in line:
print >>fout,'name: \"w%d\"'%(count)
count+=1
print >>fout,line
with open(net_proto) as fid:
print >>fout
print >>fout,"################ net 2 ############"
print >>fout
count = 0
for line in fid:
line = line.strip('\n')
if 'bottom' in line or 'top' in line or 'name' in line:
if not 'data' in line:
pos = line.rfind('\"')
line = line[:pos] + '_' + pos_fix +'2' +line[pos:]
if 'lr_mult' in line:
print >>fout,'name: \"w%d\"'%(count)
count+=1
print >>fout,line
#net = caffe.Net(net_proto,net_param,caffe.TEST)
#net2 = caffe.Net(net_new_proto,caffe.TEST)
#for name,param in net.params.items():
# print 'copy param '+name
# new_name =name+'_' + pos_fix
# for ix,sub_param in enumerate(param):
# net2.params[new_name][ix].data[:] = sub_param.data.copy()
#base_name = os.path.basename(net_param).strip().split('.')
#net_new_param = base_name[0]+'_'+pos_fix+'.'+base_name[1]
#net2.save(net_new_param)
|
UTF-8
|
Python
| false | false | 2,210 |
py
| 161 |
convert_model_triplet.py
| 110 | 0.481448 | 0.471946 | 0 | 65 | 32.984615 | 70 |
Podidiving/catalyst
| 4,664,334,529,492 |
441751de0a52199f9b7575aee1c3da512e669fc7
|
9b6bc3f768ec86caf2074141be90262c9662762a
|
/catalyst/loggers/tensorboard.py
|
1b777aa4c201da32fa79c54bab9ec15c32fa80f5
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/Podidiving/catalyst
|
298dca23e5cf51dda6bbc0a744874ae2c8787bc5
|
ac8567dc389fb7a265e3104e8a743497aa903165
|
refs/heads/master
| 2021-12-03T21:48:48.232619 | 2021-11-03T05:29:19 | 2021-11-03T05:29:19 | 225,822,776 | 2 | 0 |
Apache-2.0
| true | 2019-12-04T08:59:03 | 2019-12-04T08:59:03 | 2019-12-04T08:30:59 | 2019-12-04T08:42:44 | 11,052 | 0 | 0 | 0 | null | false | false |
from typing import Dict
import os
import numpy as np
from tensorboardX import SummaryWriter
from catalyst.core.logger import ILogger
from catalyst.loggers.functional import image_to_tensor
class TensorboardLogger(ILogger):
"""Tensorboard logger for parameters, metrics, images and other artifacts.
Args:
logdir: path to logdir for tensorboard
use_logdir_postfix: boolean flag to use extra ``tensorboard`` prefix in the logdir
.. note::
This logger is used by default by ``dl.Runner`` and ``dl.SupervisedRunner`` in case of
specified logdir during ``runner.train(..., logdir=/path/to/logdir)``.
.. note::
This logger is used by default by ``dl.ConfigRunner`` and ``dl.HydraRunner`` in case of
specified logdir in config ``args``.
Notebook API examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...,
loggers={"tensorboard": dl.TensorboardLogger(logdir="./logdir/tensorboard"}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"tensorboard": dl.TensorboardLogger(logdir="./logdir/tensorboard")
}
# ...
runner = CustomRunner().run()
Config API example:
.. code-block:: yaml
loggers:
tensorboard:
_target_: TensorboardLogger
logdir: ./logdir/tensorboard
...
Hydra API example:
.. code-block:: yaml
loggers:
tensorboard:
_target_: catalyst.dl.TensorboardLogger
logdir: ./logdir/tensorboard
...
"""
def __init__(self, logdir: str, use_logdir_postfix: bool = False):
"""Init."""
if use_logdir_postfix:
logdir = os.path.join(logdir, "tensorboard")
self.logdir = logdir
self.loggers = {}
os.makedirs(self.logdir, exist_ok=True)
def _check_loader_key(self, loader_key: str):
if loader_key not in self.loggers.keys():
logdir = os.path.join(self.logdir, f"{loader_key}")
self.loggers[loader_key] = SummaryWriter(logdir)
def _log_metrics(self, metrics: Dict[str, float], step: int, loader_key: str, suffix=""):
for key, value in metrics.items():
self.loggers[loader_key].add_scalar(f"{key}{suffix}", float(value), step)
def log_metrics(
self,
metrics: Dict[str, float],
scope: str = None,
# experiment info
run_key: str = None,
global_epoch_step: int = 0,
global_batch_step: int = 0,
global_sample_step: int = 0,
# stage info
stage_key: str = None,
stage_epoch_len: int = 0,
stage_epoch_step: int = 0,
stage_batch_step: int = 0,
stage_sample_step: int = 0,
# loader info
loader_key: str = None,
loader_batch_len: int = 0,
loader_sample_len: int = 0,
loader_batch_step: int = 0,
loader_sample_step: int = 0,
) -> None:
"""Logs batch and epoch metrics to Tensorboard."""
if scope == "batch":
self._check_loader_key(loader_key=loader_key)
# metrics = {k: float(v) for k, v in metrics.items()}
self._log_metrics(
metrics=metrics, step=global_sample_step, loader_key=loader_key, suffix="/batch"
)
elif scope == "loader":
self._check_loader_key(loader_key=loader_key)
self._log_metrics(
metrics=metrics, step=global_epoch_step, loader_key=loader_key, suffix="/epoch"
)
elif scope == "epoch":
# @TODO: remove naming magic
loader_key = "_epoch_"
per_loader_metrics = metrics[loader_key]
self._check_loader_key(loader_key=loader_key)
self._log_metrics(
metrics=per_loader_metrics,
step=global_epoch_step,
loader_key=loader_key,
suffix="/epoch",
)
def log_image(
self,
tag: str,
image: np.ndarray,
scope: str = None,
# experiment info
run_key: str = None,
global_epoch_step: int = 0,
global_batch_step: int = 0,
global_sample_step: int = 0,
# stage info
stage_key: str = None,
stage_epoch_len: int = 0,
stage_epoch_step: int = 0,
stage_batch_step: int = 0,
stage_sample_step: int = 0,
# loader info
loader_key: str = None,
loader_batch_len: int = 0,
loader_sample_len: int = 0,
loader_batch_step: int = 0,
loader_sample_step: int = 0,
) -> None:
"""Logs image to Tensorboard for current scope on current step."""
assert loader_key is not None
self._check_loader_key(loader_key=loader_key)
tensor = image_to_tensor(image)
self.loggers[loader_key].add_image(f"{tag}/{scope}", tensor, global_step=global_epoch_step)
def flush_log(self) -> None:
"""Flushes the loggers."""
for logger in self.loggers.values():
logger.flush()
def close_log(self, scope: str = None) -> None:
"""Closes the loggers."""
if scope is None or scope == "experiment":
for logger in self.loggers.values():
logger.close()
__all__ = ["TensorboardLogger"]
|
UTF-8
|
Python
| false | false | 5,650 |
py
| 161 |
tensorboard.py
| 126 | 0.554159 | 0.550265 | 0 | 181 | 30.21547 | 99 |
ethanjperez/convince
| 6,940,667,152,806 |
9a49b71e7a8644514247d500f9f27ce54bcb7b30
|
79e1d04867c4298b23c907f92c7119e4bea8ef02
|
/allennlp/allennlp/modules/text_field_embedders/basic_text_field_embedder.py
|
a4e1fd2830dd711054aa69b3beaf69969601ee01
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/ethanjperez/convince
|
53db0bcd978831799c68fe63ecb0c91473ec40c4
|
ccf60824b28f0ce8ceda44a7ce52a0d117669115
|
refs/heads/master
| 2023-01-08T09:12:16.722614 | 2021-11-03T18:50:30 | 2021-11-03T18:50:30 | 205,189,291 | 27 | 8 |
Apache-2.0
| false | 2023-01-05T22:43:12 | 2019-08-29T15:03:34 | 2021-11-03T18:51:00 | 2023-01-05T22:43:07 | 108,155 | 24 | 6 | 7 |
Python
| false | false |
from typing import Dict, List
import warnings
import torch
from overrides import overrides
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TextFieldEmbedder.register("basic")
class BasicTextFieldEmbedder(TextFieldEmbedder):
"""
This is a ``TextFieldEmbedder`` that wraps a collection of :class:`TokenEmbedder` objects. Each
``TokenEmbedder`` embeds or encodes the representation output from one
:class:`~allennlp.data.TokenIndexer`. As the data produced by a
:class:`~allennlp.data.fields.TextField` is a dictionary mapping names to these
representations, we take ``TokenEmbedders`` with corresponding names. Each ``TokenEmbedders``
embeds its input, and the result is concatenated in an arbitrary order.
Parameters
----------
token_embedders : ``Dict[str, TokenEmbedder]``, required.
A dictionary mapping token embedder names to implementations.
These names should match the corresponding indexer used to generate
the tensor passed to the TokenEmbedder.
embedder_to_indexer_map : ``Dict[str, List[str]]``, optional, (default = None)
Optionally, you can provide a mapping between the names of the TokenEmbedders
that you are using to embed your TextField and an ordered list of indexer names
which are needed for running it. In most cases, your TokenEmbedder will only
require a single tensor, because it is designed to run on the output of a
single TokenIndexer. For example, the ELMo Token Embedder can be used in
two modes, one of which requires both character ids and word ids for the
same text. Note that the list of token indexer names is `ordered`, meaning
that the tensors produced by the indexers will be passed to the embedders
in the order you specify in this list.
allow_unmatched_keys : ``bool``, optional (default = False)
If True, then don't enforce the keys of the ``text_field_input`` to
match those in ``token_embedders`` (useful if the mapping is specified
via ``embedder_to_indexer_map``).
"""
def __init__(self,
token_embedders: Dict[str, TokenEmbedder],
embedder_to_indexer_map: Dict[str, List[str]] = None,
allow_unmatched_keys: bool = False) -> None:
super(BasicTextFieldEmbedder, self).__init__()
self._token_embedders = token_embedders
self._embedder_to_indexer_map = embedder_to_indexer_map
for key, embedder in token_embedders.items():
name = 'token_embedder_%s' % key
self.add_module(name, embedder)
self._allow_unmatched_keys = allow_unmatched_keys
@overrides
def get_output_dim(self) -> int:
output_dim = 0
for embedder in self._token_embedders.values():
output_dim += embedder.get_output_dim()
return output_dim
def forward(self, text_field_input: Dict[str, torch.Tensor], num_wrapping_dims: int = 0) -> torch.Tensor:
embedder_keys = self._token_embedders.keys()
input_keys = text_field_input.keys()
# Check for unmatched keys
if not self._allow_unmatched_keys:
if embedder_keys < input_keys:
# token embedder keys are a strict subset of text field input keys.
message = (f"Your text field is generating more keys ({list(input_keys)}) "
f"than you have token embedders ({list(embedder_keys)}. "
f"If you are using a token embedder that requires multiple keys "
f"(for example, the OpenAI Transformer embedder or the BERT embedder) "
f"you need to add allow_unmatched_keys = True "
f"(and likely an embedder_to_indexer_map) to your "
f"BasicTextFieldEmbedder configuration. "
f"Otherwise, you should check that there is a 1:1 embedding "
f"between your token indexers and token embedders.")
raise ConfigurationError(message)
elif self._token_embedders.keys() != text_field_input.keys():
# some other mismatch
message = "Mismatched token keys: %s and %s" % (str(self._token_embedders.keys()),
str(text_field_input.keys()))
raise ConfigurationError(message)
embedded_representations = []
keys = sorted(embedder_keys)
for key in keys:
# If we pre-specified a mapping explictly, use that.
if self._embedder_to_indexer_map is not None:
tensors = [text_field_input[indexer_key] for
indexer_key in self._embedder_to_indexer_map[key]]
else:
# otherwise, we assume the mapping between indexers and embedders
# is bijective and just use the key directly.
tensors = [text_field_input[key]]
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, 'token_embedder_{}'.format(key))
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
token_vectors = embedder(*tensors)
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
# This is some unusual logic, it needs a custom from_params.
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BasicTextFieldEmbedder': # type: ignore
# pylint: disable=arguments-differ,bad-super-call
# The original `from_params` for this class was designed in a way that didn't agree
# with the constructor. The constructor wants a 'token_embedders' parameter that is a
# `Dict[str, TokenEmbedder]`, but the original `from_params` implementation expected those
# key-value pairs to be top-level in the params object.
#
# This breaks our 'configuration wizard' and configuration checks. Hence, going forward,
# the params need a 'token_embedders' key so that they line up with what the constructor wants.
# For now, the old behavior is still supported, but produces a DeprecationWarning.
embedder_to_indexer_map = params.pop("embedder_to_indexer_map", None)
if embedder_to_indexer_map is not None:
embedder_to_indexer_map = embedder_to_indexer_map.as_dict(quiet=True)
allow_unmatched_keys = params.pop_bool("allow_unmatched_keys", False)
token_embedder_params = params.pop('token_embedders', None)
if token_embedder_params is not None:
# New way: explicitly specified, so use it.
token_embedders = {
name: TokenEmbedder.from_params(subparams, vocab=vocab)
for name, subparams in token_embedder_params.items()
}
else:
# Warn that the original behavior is deprecated
warnings.warn(DeprecationWarning("the token embedders for BasicTextFieldEmbedder should now "
"be specified as a dict under the 'token_embedders' key, "
"not as top-level key-value pairs"))
token_embedders = {}
keys = list(params.keys())
for key in keys:
embedder_params = params.pop(key)
token_embedders[key] = TokenEmbedder.from_params(vocab=vocab, params=embedder_params)
params.assert_empty(cls.__name__)
return cls(token_embedders, embedder_to_indexer_map, allow_unmatched_keys)
def extend_vocab(self, extended_vocab: Vocabulary) -> None:
"""
It assures that ``basic_text_field_embedder`` can embed with the extended vocab.
It iterates over each ``token_embedder`` and assures each of them can
embed with extended vocab.
"""
for key, _ in self._token_embedders.items():
token_embedder = getattr(self, 'token_embedder_{}'.format(key))
token_embedder.extend_vocab(extended_vocab)
|
UTF-8
|
Python
| false | false | 8,587 |
py
| 840 |
basic_text_field_embedder.py
| 586 | 0.635379 | 0.634797 | 0 | 162 | 52.006173 | 109 |
hyucel/ili9342c_mpy
| 15,822,659,539,740 |
96cbcca9b0c666b79f8464a53a442bacd5177524
|
652f531205a5701205cd6758ee159eb698f9e55a
|
/examples/M5STACK/rot.py
|
850144c5926f31075b5c5e03aed4a665158b1207
|
[
"MIT"
] |
permissive
|
https://github.com/hyucel/ili9342c_mpy
|
4c8863f2335ec058d1fb4a6bcf78a3993dfea169
|
4022a8372b586358a3e9a4f45fafe82c1d5469e7
|
refs/heads/main
| 2023-06-24T20:00:21.420299 | 2021-07-21T02:51:30 | 2021-07-21T02:51:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
fonts.py
Pages through all characters of four fonts on the M5Stack Display.
"""
import utime
import random
from machine import Pin, SPI
import ili9342c
import vga1_bold_16x32 as font
def main():
tft = ili9342c.ILI9342C(
SPI(
2,
baudrate=60000000,
polarity=1,
phase=1,
sck=Pin(18),
mosi=Pin(23)),
320,
240,
reset=Pin(33, Pin.OUT),
cs=Pin(14, Pin.OUT),
dc=Pin(27, Pin.OUT),
backlight=Pin(32, Pin.OUT),
rotation=0,
buffer_size=16*32*2)
tft.init()
tft.fill(ili9342c.BLACK)
utime.sleep(1)
while True:
for rot in range(8):
if rot in [0, 4]:
tft.fill(ili9342c.BLACK)
tft.rotation(rot)
s = "Rot {}".format(rot)
tft.text(
font,
s,
0,
0,
ili9342c.WHITE)
utime.sleep(3)
main()
|
UTF-8
|
Python
| false | false | 1,012 |
py
| 20 |
rot.py
| 14 | 0.470356 | 0.399209 | 0 | 53 | 18.09434 | 70 |
Sirius79/fuzzy-logic-controller
| 4,784,593,613,319 |
e20dc752ba30debd2c84d3e73ebf6dcacfce799f
|
e7626895a0832d345f65e408fb54e4d317e7eb35
|
/Fuzzy logic controller/defuzzify.py
|
98973a6f5076974a222a433ed062fe60f2ee7d4c
|
[
"MIT"
] |
permissive
|
https://github.com/Sirius79/fuzzy-logic-controller
|
761be9b773a8509c3ca472296427b36a8c514014
|
0b67015eedd25e560d77e1b548a684138e5a7337
|
refs/heads/master
| 2020-04-22T16:24:22.626389 | 2019-02-18T18:13:07 | 2019-02-18T18:13:07 | 170,506,554 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def maximum(**kwargs):
maxim = 0
max_key = None
for key, value in kwargs.items():
if value > maxim:
max_key = key
maxim = value
return max_key, maxim
def get_area(fuel):
centre = 0
area = 0
if fuel == 0:
area = 50
centre = 50
elif fuel == 1:
area = 100
centre = 100
elif fuel == 2:
area = 175
centre = 237.5
elif fuel == 3:
area = 150
centre = 350
elif fuel == 4:
area = 575
centre = 616.66
return centre, area
|
UTF-8
|
Python
| false | false | 577 |
py
| 6 |
defuzzify.py
| 5 | 0.467938 | 0.400347 | 0 | 29 | 18.862069 | 37 |
Hunter2809/revolt.py
| 11,562,052,000,549 |
ccdcdd017fa632b586cdfc1f11c5e6012f40a848
|
e97251549ad7f2a3d0ecb6d1233d693b20dc02e8
|
/revolt/server.py
|
7e0aea25cf918a8849fa85294f329d2be275eb03
|
[
"MIT"
] |
permissive
|
https://github.com/Hunter2809/revolt.py
|
541a27c371c1190367534a178595194b053137f9
|
31fc9b4de6e3007107a149eddae9680417c4c8d8
|
refs/heads/master
| 2023-09-01T18:42:42.908895 | 2021-10-24T17:34:05 | 2021-10-24T17:34:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, cast
from .channel import Channel
from .permissions import ServerPermissions
from .role import Role
if TYPE_CHECKING:
from .member import Member
from .state import State
from .types import Server as ServerPayload
__all__ = ("Server",)
class Server:
"""Represents a server
Attributes
-----------
id: :class:`str`
The id of the server
name: :class:`str`
The name of the server
owner: Optional[:class:`Member`]
The owner of the server
"""
__slots__ = ("state", "id", "name", "owner_id", "default_permissions", "_members", "_roles", "_channels")
def __init__(self, data: ServerPayload, state: State):
self.state = state
self.id = data["_id"]
self.name = data["name"]
self.owner_id = data["owner"]
self.default_permissions = ServerPermissions(*data["default_permissions"])
self._members: dict[str, Member] = {}
self._roles: dict[str, Role] = {role_id: Role(role, role_id, state, self) for role_id, role in data.get("roles", {}).items()}
channels = cast(list[Channel], list(filter(bool, [state.get_channel(channel_id) for channel_id in data["channels"]])))
self._channels: dict[str, Channel] = {channel.id: channel for channel in channels}
@property
def roles(self) -> list[Role]:
"""list[:class:`Role`] Gets all roles in the server in decending order"""
return list(self._roles.values())
@property
def members(self) -> list[Member]:
"""list[:class:`Member`] Gets all members in the server"""
return list(self._members.values())
@property
def channels(self) -> list[Channel]:
"""list[:class:`Member`] Gets all channels in the server"""
return list(self._channels.values())
def get_role(self, role_id: str) -> Role:
"""Gets a role from the cache
Parameters
-----------
id: :class:`str`
The id of the role
Returns
--------
:class:`Role`
The role
"""
return self._roles[role_id]
def get_member(self, member_id: str) -> Member:
"""Gets a member from the cache
Parameters
-----------
id: :class:`str`
The id of the member
Returns
--------
:class:`Member`
The member
"""
return self._members[member_id]
def get_channel(self, channel_id: str) -> Channel:
"""Gets a channel from the cache
Parameters
-----------
id: :class:`str`
The id of the channel
Returns
--------
:class:`Channel`
The channel
"""
return self._channels[channel_id]
@property
def owner(self) -> Optional[Member]:
owner_id = self.owner_id
if not owner_id:
return
return self.get_member(owner_id)
async def set_default_permissions(self, permissions: ServerPermissions) -> None:
"""Sets the default server permissions.
Parameters
-----------
permissions: :class:`ServerPermissions`
The new default server permissions
"""
await self.state.http.set_default_permissions(self.id, *permissions.value)
|
UTF-8
|
Python
| false | false | 3,444 |
py
| 13 |
server.py
| 12 | 0.55662 | 0.55662 | 0 | 119 | 27.941176 | 133 |
llanesm/life_generator_project
| 8,091,718,426,853 |
a3b9b0559ff0a88ed7e78af4e40e826e99c8735e
|
2de7a12a5d983b5e7ba52b2dbc24af641d45a752
|
/life-generator.py
|
a4729ad15614b5502f33cb1d230d336ee0e5810a
|
[] |
no_license
|
https://github.com/llanesm/life_generator_project
|
6a54e343d12bf17903b78c6a6badfa1b789ad04a
|
417fe26da9b8d0bc381db716496596be93cc3d1d
|
refs/heads/main
| 2023-03-09T11:23:14.904789 | 2021-03-01T02:41:24 | 2021-03-01T02:41:24 | 338,403,947 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Matthew Llanes
# Last Update: 2/28/2021
import csv
import sys
from tkinter import *
from tkinter import ttk
import os
def search_sort(search_results, top_amount):
"""
Performs a series of sorts and slices to provide the top toys in given category
:param search_results: list of products whose category matches user search
:param top_amount: number of products to return
:return: list of top_amount of top rated products from search results
"""
search_results.sort(key=lambda item: item.uniq_id) # sort by uniq_id
search_results.sort(key=lambda item: item.num_of_reviews, reverse=True) # then by number_of_reviews
search_results = search_results[0:top_amount * 10] # take top X*10
search_results.sort(key=lambda item: item.uniq_id) # re-sort by uniq_id
search_results.sort(key=lambda item: item.avg_review_rating, reverse=True) # then by average_review_rating
search_results = search_results[0:top_amount] # take top X
return search_results
class CSVData:
def __init__(self, infile):
"""Parse input file and create attributes from rows of dictionaries for easier access"""
self.infile = infile # csv file, in our case the kaggle amazon data
self.main_product_list = []
reader = csv.DictReader(open(infile, encoding='utf-8'))
for row in reader:
self.make_product(row)
def make_product(self, row):
"""
Creates Product object from DictReader dictionary and adds it to main product list
:param row: one line from the csv file, one product with all info
:return: None
"""
uniq_id, product_name = row['uniq_id'], row['product_name']
num_of_reviews, avg_review_rating = row['number_of_reviews'], row['average_review_rating']
cat_and_sub_cat = row['amazon_category_and_sub_category']
product = Product(uniq_id, product_name, num_of_reviews, avg_review_rating, cat_and_sub_cat)
self.main_product_list.append(product)
def user_search(self, category, top_amount):
"""
Search CSVData for all items by input category to create new list of products
:param category: user search category
:param top_amount: how many toys to output
:return: sorted list of top toys
"""
search_results = [] # new list contains products whose main category matches search category
for product in self.main_product_list:
if product.main_category == category:
search_results.append(product)
return search_sort(search_results, top_amount)
class Product:
"""Represents a product from the kaggle dataset"""
def __init__(self, uniq_id, product_name, num_of_reviews, avg_review_rating, cat_and_sub_cat):
"""
Most attributes are retrieved directly from the CSV file,
some are further extracted for pertinent meaning.
"""
self.uniq_id = uniq_id
self.product_name = product_name
self.num_of_reviews = num_of_reviews
self.avg_review_rating = avg_review_rating
self.category_and_sub_category = cat_and_sub_cat
self.main_category = self.get_main_category()
def get_main_category(self):
"""
Retrieves main category that the user will search for
:return: string indicating the main category of a product
"""
main_category = ''
for letter in self.category_and_sub_category:
if letter == ' ':
break
else:
main_category += letter
return main_category
def make_search_results_table(results):
"""
:param results: products from user search
:return: tuple containing lists of lists containing results data, and size of largest product name
"""
out_table = [('item_name', 'item_rating', 'item_num_reviews')]
size_of_longest_product_name = 0
for product in results:
out_table.append((product.product_name, product.avg_review_rating, product.num_of_reviews))
if len(product.product_name) > size_of_longest_product_name:
size_of_longest_product_name = len(product.product_name)
return out_table, size_of_longest_product_name
class Window:
def __init__(self, master, data):
"""Initializes GUI with its attributes"""
self.main_window = master
self.main_window.title('Life Generator')
self.product_data = data
prompt = 'Enter a category and number of top toys in that category to generate'
self.prompt = ttk.Label(master, text=prompt, font=('Arial', 12)).pack()
self.category_options = self.make_category_combobox()
self.selected_number_to_generate = self.make_number_to_generate_spinbox()
self.search_button = ttk.Button(master, text='See Results', command=self.process_search).pack()
self.search_results = ttk.Frame(self.main_window)
self.table_entry = ttk.Label(self.search_results)
def get_categories(self):
""":return: list of categories from kaggle dataset"""
categories = []
for product in self.product_data.main_product_list:
if product.main_category not in categories and product.main_category != '':
categories.append(product.main_category)
else:
continue
return categories
def make_category_combobox(self):
""":return: combobox full of categories from kaggle data for user to choose from"""
combobox = ttk.Combobox(self.main_window)
categories = self.get_categories()
combobox['values'] = categories
combobox.set(categories[0])
combobox.pack()
return combobox
def make_number_to_generate_spinbox(self):
""":return: spinbox for reference by window object"""
number_to_generate_spinbox = ttk.Spinbox(self.main_window, from_=1, to=10000, increment=10)
number_to_generate_spinbox.set('1')
number_to_generate_spinbox.pack()
return number_to_generate_spinbox
def perform_search(self):
""":return: results from user user search"""
category = self.category_options.get()
top_amount = self.selected_number_to_generate.get()
results = self.product_data.user_search(category, int(top_amount))
input_data = ['toys', category, top_amount]
return input_data, results
def display_results_in_gui(self, out_table):
"""
:param out_table: tuple containing lists of lists containing results data, and size of largest product name
:return: None
"""
for i in range(len(out_table[0])):
for j in range(3):
if j == 0:
width = out_table[1]
else:
width = 20
self.table_entry = ttk.Label(self.search_results, width=width)
self.table_entry.grid(row=i, column=j)
self.table_entry.config(text=out_table[0][i][j])
def process_search(self):
"""
Function to process search and output data to outfile and/or GUI table
:return: None
"""
self.search_results.destroy()
self.search_results = ttk.Frame(self.main_window)
results = self.perform_search()
make_outfile(results[0], results[1])
out_table = make_search_results_table(results[1])
self.display_results_in_gui(out_table)
self.search_results.pack()
class ContentGenerator:
"""Microservice provided by other student, will use output file to display information to the user"""
def __init__(self):
self.path = "C:\\Users\\Idabeard\\SoftwareEngineering1\\Daniel D's Content Generator\\output.csv"
self.wiki_paragraph = None
self.primary_keyword = None
self.secondary_keyword = None
self.process_content_generated()
def does_path_exist(self):
"""
:return:
"""
return os.path.exists(self.path)
def process_content_generated(self):
"""
:return:
"""
if os.path.exists(self.path):
reader = csv.reader(open(self.path))
next(reader)
next(reader)
content = next(reader)
self.make_keywords(content[0])
self.wiki_paragraph = content[1]
def make_keywords(self, keywords):
"""
:param keywords: string with a primary and secondary keyword, separated by a ;
:return: None
"""
primary = ''
i = 0
while keywords[i] != ';':
primary += keywords[i]
i += 1
self.primary_keyword, self.secondary_keyword = primary, keywords[i + 1:]
def process_infile(filename):
"""
:param filename: input file from command line
:return: search criteria from input file
"""
reader = csv.reader(open(filename))
line_data = next(reader)
return line_data
def make_outfile(search_criteria, output):
"""
Formats and outputs csv file from results obtained from user search
:param search_criteria: search criteria
:param output: list of product objects(toys)
:return: None, writes outfile
"""
with open('output.csv', 'w') as outfile:
first_head_half = 'input_item_type,input_item_category,input_number_to_generate,output_item_name,'
second_head_half = 'output_item_rating,output_item_num_reviews'
outfile.write(first_head_half + second_head_half + '\n')
for product in output:
criteria_str = search_criteria[0] + ',' + search_criteria[1] + ',' + search_criteria[2] + ','
product_info = product.product_name + ',' + product.avg_review_rating + ',' + product.num_of_reviews
outfile.write(criteria_str + product_info + '\n')
def main():
"""
Driver code to start program. Decision based on whether program
is started with input file on command line or used with GUI
:return: None
"""
content = ContentGenerator()
kaggle_data = CSVData('amazon_co-ecommerce_sample.csv')
if len(sys.argv) == 2: # if there's an input file, make an output file w/o GUI
infile_data = process_infile(sys.argv[1])
results = kaggle_data.user_search(infile_data[1], int(infile_data[2]))
make_outfile(infile_data, results)
else: # otherwise, start GUI
root = Tk()
app = Window(root, kaggle_data)
root.mainloop()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 10,543 |
py
| 2 |
life-generator.py
| 1 | 0.628758 | 0.624016 | 0 | 273 | 37.619048 | 115 |
AchimGoral/DI_Bootcamp
| 5,274,219,864,861 |
48558ca97bbaf6d263c09edd405475d08c415fd8
|
b37fdefb01d7b93a4f56a7c7cc60f9f78549de4c
|
/DI_Bootcamp/Week_8/Day_3/Exercise_XP/gifapp_root/gif/admin.py
|
4629c5c5f6b11a9d241a082b372f10ef64e99394
|
[] |
no_license
|
https://github.com/AchimGoral/DI_Bootcamp
|
e7b13d7397ab5c9e5ad8041430c8bfbafec13c88
|
9345731503e2bb298bd3a579ffad590350f13df5
|
refs/heads/main
| 2023-04-18T20:06:45.631067 | 2021-05-01T08:08:45 | 2021-05-01T08:08:45 | 328,769,128 | 0 | 1 | null | false | 2021-01-27T14:30:09 | 2021-01-11T19:24:48 | 2021-01-27T14:26:29 | 2021-01-27T14:30:09 | 20,055 | 0 | 0 | 0 |
HTML
| false | false |
from django.contrib import admin
from.models import * # Gif(mtm), Category
# Register your models here.
admin.site.register(Gif)
admin.site.register(Category)
|
UTF-8
|
Python
| false | false | 159 |
py
| 281 |
admin.py
| 154 | 0.786164 | 0.786164 | 0 | 6 | 25.666667 | 41 |
TerminusEst/E_Field_Calc
| 300,647,731,619 |
377f5a530ee2ba08b8682e17c0796bee92d6a74d
|
24300761c3c8846196600b7111370e5c3fbc6305
|
/CCMC_analysis_scripts/1_READ_B.py
|
92a57d33c0e624c5a58798b44cb6c428ce4fb8d5
|
[] |
no_license
|
https://github.com/TerminusEst/E_Field_Calc
|
18a9150e4d45c051501ec17fa12ffbb0785f8641
|
83d533bd8853b3a17e2c38360e6eddd7ad9da534
|
refs/heads/master
| 2021-01-23T07:51:16.356803 | 2019-04-10T15:28:03 | 2019-04-10T15:28:03 | 80,513,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from draco import EField as ef
import aacgmv2
import matplotlib.dates as mdates
#/usr/bin/ffmpeg -r 30 -f image2 -i %03d.png -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" -vcodec libx264 -crf 10 -pix_fmt yuv420p 2003_11_20_Scan.mp4
##########################################################################################
def time2float(x):
"""converts datetime to float, so that interpolation/smoothing can be performed"""
if (type(x) == numpy.ndarray) or (type(x) == list):
emptyarray = []
for i in x:
z = (i - datetime.datetime(1970, 1, 1, 0)).total_seconds()
emptyarray.append(z)
emptyarray = numpy.array([emptyarray])
return emptyarray[0]
else:
return (x - datetime.datetime(1970, 1, 1, 0)).total_seconds()
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
def float2time(x):
"""converts array back to datetime so that it can be plotted with time on the axis"""
if (type(x) == numpy.ndarray) or (type(x) == list):
emptyarray = []
for i in x:
z = datetime.datetime.utcfromtimestamp(i)
emptyarray.append(z)
emptyarray = numpy.array([emptyarray])
return emptyarray[0]
else:
return datetime.datetime.utcfromtimestamp(x)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
def get_date(aaa, filetype):
"""get datetime from filename"""
if filetype == "iono":
year = int(aaa[13:17])
month = int(aaa[17:19])
day = int(aaa[19:21])
hour = int(aaa[22:24])
minute = int(aaa[24:26])
elif filetype == "mag":
year = int(aaa[10:14])
month = int(aaa[14:16])
day = int(aaa[16:18])
hour = int(aaa[19:21])
minute = int(aaa[21:23])
timedate = datetime.datetime(year, month, day, hour, minute)
dayfrac = ((minute/60.) + hour)/24.
loncorrect = ((1 - dayfrac)*360) - 180.
#print(timedate, dayfrac, loncorrect)
return timedate, dayfrac, loncorrect
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
def calculate_magnetic_latitude(glon, glat, dtime):
for height in arange(0, 100, 0.5):
calclat, calclon, z = aacgmv2.get_aacgm_coord(glat, glon, height, start)
if np.isnan(calclat) != True:
return calclat, calclon, height
return nan, nan, nan
def get_td_1(a):
b = a.split("_")
year = int(b[2][:4])
month = int(b[2][4:6])
day = int(b[2][6:])
hour = int(b[-1][:2])
minute = int(b[-1][2:4])
second = int(b[-1][4:6])
td = datetime.datetime(year, month, day, hour, minute, second)
return td
def get_td_2(a):
b = a[10:]
year = int(b[:4])
month = int(b[4:6])
day = int(b[6:8])
hour = int(b[9:11])
minute = int(b[11:13])
second = int(b[13:15])
td = datetime.datetime(year, month, day, hour, minute, second)
return td
##########################################################################################
if 1:
input_filename = "/home/blake/Drive/NASA_Data/CCMC_Runs_With_Changes/outputs/Sean_Blake_040519_1/DST_CCMC_output.txt"
output_filename = "/home/blake/Drive/NASA_Data/CCMC_Runs_With_Changes/outputs/Sean_Blake_040519_1/Numpy_Data/DST"
eff.read_write_dst(input_filename, output_filename)
##########################################################################################
folder_mag = "/home/blake/Drive/NASA_Data/CCMC_Runs_With_Changes/outputs/Sean_Blake_040519_1/Mag_Data/"
files_mag = sorted(os.listdir(folder_mag))
lenfiles = len(files_mag) * 1.0
geolon, geolat = np.loadtxt(folder_mag + files_mag[0], usecols = (0, 1), unpack = True, skiprows = 10)
#-------------------------------------------------------------------------------
# set up the master objects to be populated
# read in the files average the files for each minute
print("Reading in files")
timedate1 = []
masterBx = np.ones((int(len(files_mag)), len(geolon)))
masterBy, masterBz = np.copy(masterBx), np.copy(masterBx)
masterMx, masterMy, masterMz = np.copy(masterBx), np.copy(masterBx), np.copy(masterBx)
masterFx, masterFy, masterFz = np.copy(masterBx), np.copy(masterBx), np.copy(masterBx)
masterHx, masterHy, masterHz = np.copy(masterBx), np.copy(masterBx), np.copy(masterBx)
masterPx, masterPy, masterPz = np.copy(masterBx), np.copy(masterBx), np.copy(masterBx)
for i, v in enumerate(files_mag):
dBn, dBe, dBd = np.loadtxt(folder_mag + v, skiprows = 4, usecols = (5, 6, 7), unpack = True)
masterBx[i], masterBy[i], masterBz[i] = dBn, dBe, dBd
timedate1.append(get_td_1(v))
if i%10 == 0:
print(i, v)
numpysavefolder = "/home/blake/Drive/NASA_Data/CCMC_Runs_With_Changes/outputs/Sean_Blake_040519_1/Numpy_Data/"
np.save(numpysavefolder + "BX", masterBx)
np.save(numpysavefolder + "BY", masterBy)
lonlat = np.concatenate((np.array([geolon]).T, np.array([geolat]).T), axis = 1)
np.save(numpysavefolder + "LONLAT", lonlat)
np.save(numpysavefolder + "TD", timedate1)
##########################################################################################
|
UTF-8
|
Python
| false | false | 5,149 |
py
| 26 |
1_READ_B.py
| 22 | 0.550204 | 0.51408 | 0 | 139 | 35.647482 | 145 |
nohtanoj/autorubric
| 12,206,297,104,092 |
f99459d6795b6758ba8500cc0192e17aed1d1533
|
5053aca9fc86fcc372446f5339df7137f598d64e
|
/autorubric/rubric/proc_data.py
|
91627bed185554107a3787b92521746d7301a058
|
[] |
no_license
|
https://github.com/nohtanoj/autorubric
|
d795dac4369a79838052d72d4e8e5f203098a9a6
|
54b7b65c50bd28474fb08be8548ca0c8b65b239a
|
refs/heads/master
| 2020-04-10T14:53:29.711742 | 2019-05-16T15:07:34 | 2019-05-16T15:07:34 | 161,090,576 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Process response and reference data files
"""
from rubric.utils import normalize
import collections
import csv
EMAIL_COL_RESPONSES = 'Email Address'
def proc_responses(fname, problem, students=None):
"""
Generate valid and invalid submissions
(in terms of compilability) for a particular problem.
Arguments:
fname: filename of responses to read
problem: name of problem to process
students: specific set of students to consider (iterable containing email addresses)
Returns:
valid_submissions, invalid_submissions, and nones
valid_submissions: dict with key as email and value as the code submission
invalid_submissions: dicts with key as email and value as the code submission
nones: set of students who submitted None
"""
valid_submissions = {}
invalid_submissions = {}
with open(fname, encoding='windows-1252') as csvfile:
response_reader = csv.DictReader(csvfile)
for _, row in enumerate(response_reader):
if students is not None and row[EMAIL_COL_RESPONSES] not in students:
continue
else:
code = row[problem]
split_lines = code.splitlines()
res = []
for ll in split_lines:
if ll.strip():
rstrip_str = ll.rstrip()
lspace = rstrip_str[:-len(ll.rstrip().lstrip())]
res.append(lspace + normalize(ll.strip()))
code = "\n".join(res)
email = row[EMAIL_COL_RESPONSES]
try:
compile(code, '<string>', 'exec')
if email in invalid_submissions:
del invalid_submissions[email]
valid_submissions[email] = code
except SyntaxError:
try:
code = code.replace('_', '')
split_lines = code.splitlines()
res = []
for ll in split_lines:
if ll.strip():
rstrip_str = ll.rstrip()
lspace = rstrip_str[:-len(ll.rstrip().lstrip())]
res.append(lspace + normalize(ll.strip()))
code = "\n".join(res)
compile(code, '<string>', 'exec')
if email in invalid_submissions:
del invalid_submissions[email]
valid_submissions[email] = code
except SyntaxError:
invalid_submissions[email] = code
# Get rid of Nones
nones = set()
for key, value in valid_submissions.items():
if value.strip() == 'None' or value.strip() == '"None"':
nones.add(key)
for k in nones:
del valid_submissions[k]
return valid_submissions, invalid_submissions, nones
COLNAME = 'c'
EMAIL_COL_REF = 'Email'
def gen_ref_scores(emails, fname, num_features):
"""
Generates reference truth values for submissions.
Args:
emails: set of emails
fname: Gradescope-generated csv file for scores
num_features: number of features, an integer
Returns:
dictionary containing mapping from emails to truth values
"""
with open(fname) as csvfile:
grader_scores = csv.reader(csvfile)
column_names = next(grader_scores)
submission_time_index = column_names.index('Submission Time')
adjustment_index = column_names.index('Adjustment')
rubric_feature_names = column_names[submission_time_index+1:adjustment_index]
newcols = []
for j in range(len(rubric_feature_names)):
newcols.append(COLNAME + str(j))
newcolumns = tuple(column_names[: submission_time_index + 1] + newcols + column_names[adjustment_index:])
ref_rubric_scores = collections.defaultdict(list)
with open(fname) as csvfile:
f = csv.DictReader(csvfile, fieldnames = newcolumns)
for row in f:
if row[EMAIL_COL_REF] in emails:
for i in range(num_features):
actual_item = True if row[COLNAME + str(i)] == 'true' else False
ref_rubric_scores[row[EMAIL_COL_REF]].append(actual_item)
return ref_rubric_scores
|
UTF-8
|
Python
| false | false | 4,411 |
py
| 27 |
proc_data.py
| 26 | 0.560871 | 0.55951 | 0 | 116 | 37.025862 | 109 |
dambergn-codefellows/py401_data-structures-and-algorithms
| 2,516,850,874,253 |
df2369ff2d0330ef45c1ddd3e076d51303e386b5
|
c328b3fdcffbe7dc879e5304358aef2cd8061cfb
|
/challenges/array_shift/test_array_shift.py
|
2816917f172bc05eeccf79b3fdbe5bcf65c26ef3
|
[
"MIT"
] |
permissive
|
https://github.com/dambergn-codefellows/py401_data-structures-and-algorithms
|
35652237173192a0b1779310a351df0ec77ae930
|
64ca78a70c9de8bee37459481eb8ce0d359b1bb8
|
refs/heads/master
| 2020-03-26T06:43:56.841490 | 2018-09-28T20:50:01 | 2018-09-28T20:50:01 | 144,619,223 | 0 | 0 |
MIT
| false | 2018-09-28T20:50:02 | 2018-08-13T18:36:39 | 2018-09-06T06:36:55 | 2018-09-28T20:50:02 | 22,833 | 0 | 0 | 0 |
Python
| false | null |
from .array_shift import insertShiftArray
import pytest
def test_array_array_module_exists():
assert insertShiftArray
def test_array_add_middle_even():
expected = [1,2,3,4,5]
actual = insertShiftArray([1,2,4,5], 3)
assert expected == actual
def test_array_add_middle_odd():
expected = [1,2,3,4,5,6]
actual = insertShiftArray([1,2,3,5,6], 4)
assert expected == actual
def test_array_add_middle_example1():
expected = [2,4,5,6,8]
actual = insertShiftArray([2,4,6,8], 5)
assert expected == actual
def test_array_add_middle_example2():
expected = [4,8,15,16,23,42]
actual = insertShiftArray([4,8,15,23,42], 16)
assert expected == actual
|
UTF-8
|
Python
| false | false | 668 |
py
| 31 |
test_array_shift.py
| 18 | 0.685629 | 0.60479 | 0 | 30 | 21.3 | 47 |
VorTECHsa/python-sdk
| 13,305,808,689,364 |
b84c740e33f367097dcf1a6eec930e682fa41de0
|
f853366f0f7574610034bf83585269775d3dd03a
|
/docs/examples/2_crude_from_saudi_arabia_to_india.py
|
a0708ece0781fa58ee6f2ef42bdbff4a4f450d11
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/VorTECHsa/python-sdk
|
0355094bc245bf1b1c9c6f396fadf30196bf6729
|
f994d6a0c5a87e3e08a0bdec4b5b2cdb9e125c10
|
refs/heads/master
| 2023-09-01T21:42:15.633706 | 2023-08-31T09:14:00 | 2023-08-31T09:14:00 | 218,062,510 | 10 | 8 |
Apache-2.0
| false | 2023-09-14T08:16:26 | 2019-10-28T14:13:16 | 2022-12-14T23:07:23 | 2023-09-14T08:16:25 | 37,598 | 19 | 7 | 39 |
Python
| false | false |
"""
Let's find all crude cargo movements from Saudi Arabia to India that loaded in the last month.
The below script returns a `pd.DataFrame`, similar to the table given in the movements tab of `https://analytics.vortexa.com`,
filtering on `Products: Crude` with `Origin: Saudi Arabia`, `Destination: India` and `Date Range: Departures in the last Month`.
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from vortexasdk import CargoMovements, Geographies, Products
if __name__ == "__main__":
now = datetime.utcnow()
one_month_ago = now - relativedelta(months=1)
# For this analysis we need the geography ID for India, and the geography ID for Saudi Arabia. We're going to
# show 2 ways to retrieve geography IDs. You'll want to chose method 1 or 2 depending on your use case.
# Option 1. We look up a geography with an exact matching name
saudi_arabia = (
Geographies()
.search("Saudi Arabia", exact_term_match=True)
.to_list()[0]
.id
)
# Option 2. We search for geographies with similar names, then pick the one we're looking for
# First we find the ID for the country India. Note that when searching geographies with the term 'india', we'll
# retrieve all geographies with india in the name, ie Indiana, British Indian Ocean Territory...
all_geogs_with_india_in_the_name = Geographies().search("india").to_list()
# If running interactively, you may want to print all the names here to inspect them for yourself
for g in all_geogs_with_india_in_the_name:
print(g.name)
# We're only interested in the country India here
india = [
g.id for g in all_geogs_with_india_in_the_name if g.name == "India"
]
# Check we've only got one ID for India
assert len(india) == 1
# Let's find the Crude ID,
# here we know the exact name of the product we're looking for so we set exact_term_match=True
crude = Products().search("Crude", exact_term_match=True).to_list()[0].id
# Query the API.
search_result = CargoMovements().search(
filter_activity="loading_end",
filter_origins=saudi_arabia,
filter_destinations=india,
filter_products=crude,
filter_time_min=one_month_ago,
filter_time_max=now,
)
# A complete list of available columns can be found at https://vortechsa.github.io/python-sdk/endpoints/cargo_movements/#notes
# We only require a subset of available columns here
required_columns = [
# A cargo movement can be carried by multiple vessels across various STS transfers. You can find all the vessels that
# the cargo was onboard by inspecting the 'vessels.0', 'vessels.1' columns etc.
# The 'vessels.0' columns shows the primary vessel associated with the cargo movement
"vessels.0.name",
"vessels.0.vessel_class",
# Here we show any corporate information associated with the primary vessel
"vessels.0.corporate_entities.charterer.label",
"vessels.0.corporate_entities.time_charterer.label",
"vessels.0.corporate_entities.effective_controller.label",
# Show the product information and quantity
"product.group.label",
"product.grade.label",
"quantity",
# Is the vessel in transit, has it already discharged, or is it in floating storage?
"status",
# Show the loading Port name, and the loading timestamp
"events.cargo_port_load_event.0.location.port.label",
"events.cargo_port_load_event.0.end_timestamp",
# Show the discharge Port name, and the discharge timestamp
"events.cargo_port_unload_event.0.location.port.label",
"events.cargo_port_unload_event.0.end_timestamp",
]
# Convert the search result to a dataframe
df = search_result.to_df(columns=required_columns)
# Sort the dataframe by loading timestamp
df = df.sort_values(by=["events.cargo_port_load_event.0.end_timestamp"])
|
UTF-8
|
Python
| false | false | 4,021 |
py
| 231 |
2_crude_from_saudi_arabia_to_india.py
| 146 | 0.687143 | 0.681671 | 0 | 90 | 43.677778 | 130 |
jrying/algorithm_practice
| 3,315,714,796,205 |
e053ce850e4e89da6e223e9822a84dcdd2063c24
|
3c15c260736cafd9e691585fb50512e844345fd5
|
/codelets/connected_component.py
|
937fc356964d1735ae387c76bbbe20c040b464d9
|
[] |
no_license
|
https://github.com/jrying/algorithm_practice
|
10a94033723144a45cd656aff5562957c8667438
|
da1ba84e4a552dfefc6ee0079858aa508e8ab2da
|
refs/heads/master
| 2020-12-22T07:59:20.211871 | 2016-08-10T10:53:46 | 2016-08-10T10:53:46 | 16,938,384 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import itertools
# With library
# Without library
|
UTF-8
|
Python
| false | false | 53 |
py
| 62 |
connected_component.py
| 50 | 0.754717 | 0.754717 | 0 | 6 | 7.833333 | 17 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.