code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import os
import wget
import time
import glob
import getpass
import tarfile
import subprocess
import email.mime.multipart
import email.mime.text
import email.mime.image
import email.mime.audio
from datetime import datetime
from pprint import pprint
from colorama import Style, Fore
from smtplib import SMTP, SMTP_SSL
from imaplib import IMAP4_SSL, IMAP4
def smtp_connect(smtp_server, verbose=True):
""" Conection to smtp server.
smtp_server_ip (str): This value is the smtp server's ip.
verbose (boolean): Print information about function progress.
Returns:
None
"""
try:
smtp = SMTP_SSL(host=smtp_server)
smtp.ehlo()
if verbose:
print(Fore.GREEN+ " ==> [smtp_connect] with SSL" +Style.RESET_ALL)
return smtp
except:
try:
smtp = SMTP(host=smtp_server)
smtp.ehlo()
if verbose:
print(Fore.GREEN+ " ==> [smtp_connect] without SSL" +Style.RESET_ALL)
return smtp
except:
print(Fore.RED+ " ==> [smtp_connect] failed!" +Style.RESET_ALL)
return 1
def imap_connect(imap_server, username, password, verbose=True):
""" Connection to imp server.
imap_server_ip (str): This value is the imap server's ip.
verbose (boolean): Print information about function progress.
Returns:
None
"""
try:
imap = IMAP4_SSL(imap_server)
imap.login(username, password)
if verbose:
print(Fore.GREEN+ " ==> [imap_connect] with SSL" +Style.RESET_ALL)
return imap
except:
try:
imap = IMAP4(imap_server)
imap.login(username, password)
if verbose:
print(Fore.GREEN+ " ==> [imap_connect] without SSL" +Style.RESET_ALL)
return imap
except:
print(Fore.RED+ " ==> [imap_connect] failed!" +Style.RESET_ALL)
def send_mail(smtp_server, FROM="", TO="", subject="", msg="", attachements=[], verbose=True):
""" Send mail.
smtp_server_ip (str): This value is the smtp server's ip.
FROM (str): This value is the sender email address.
TO (list): This value is a list of multiple recipient
SUBJECT (str, Optional): This value is the email's subject content.
msg (str, Optional): This value is the email's message content.
attachements (list Optional):
verbose (boolean): Print information about function progress.
Returns:
None
"""
smtp = smtp_connect(smtp_server, verbose=False)
mail = email.mime.multipart.MIMEMultipart()
mail["Subject"] = "[ "+subject+" ]"
mail["From"] = FROM
mail["To"] = TO
msg = email.mime.text.MIMEText(msg, _subtype="plain")
msg.add_header("Content-Disposition", "email message")
mail.attach(msg)
for attachement in attachements:
if attachement[0] == "image":
img = email.mime.image.MIMEImage(open(attachement[1], "rb").read())
img.add_header("Content-Disposition", "attachement")
img.add_header("Attachement-type", "image")
img.add_header("Attachement-filename", attachement[1])
mail.attach(img)
if attachement[0] == "file":
text = email.mime.text.MIMEText(open(attachement[1], "r").read())
text.add_header("Content-Disposition", "attachement")
text.add_header("Attachement-type", "filetext")
text.add_header("Attachement-filename", attachement[1])
mail.attach(text)
try:
smtp.sendmail(mail["From"], mail["To"], mail.as_string())
if verbose:
print(Fore.GREEN+ " ==> [send_mail] "+mail["From"]+" --> "+mail["To"]+" {"+subject+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
smtp_logout(smtp, verbose=False)
except Exception as e:
print(Fore.RED+ " ==> [send_mail] failed! "+mail["From"]+" --> "+mail["To"]+" -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
print(Fore.RED+str(e)+Style.RESET_ALL)
smtp_logout(smtp, verbose=False)
def read_mailbox(imap_server, username, password, verbose=True): # attribut [ _payload ]
""" Read email inbox
imap_server_ip (str): This value is the imap server's ip.
login (str): This value is the username login.
password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
all_mails = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
all_mails.append(mail_content)
for part in mail_content.walk():
if not part.is_multipart():
pass
if verbose:
print(Fore.GREEN+ " ==> [read_mailbox] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return all_mails
def read_mailbox_download_execute(imap_server, imap_login, imap_password):
""" Read email inbox and download link inside.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
try:
path = None
mails = read_mailbox(imap_server, imap_login, imap_password, verbose=False)
if len(mails) <= 0:
print(Fore.YELLOW+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return 0
for mail in mails:
for element in str(mail).replace("\n", " ").split(" "):
if "http" in element:
path = wget.download(element)
if path == None:
print(Fore.YELLOW+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return 0
tarf_file = tarfile.open(path)
tarf_file.extractall(".")
tarf_file.close()
python_files = glob.glob("*/*maj*.py")
for python_script in python_files:
subprocess.getoutput("python3 "+python_script)
print(Fore.GREEN+ " ==> [read_mailbox_download_execute] {"+str(len(mails)-1)+"} -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
return True
except Exception as e:
print(Fore.RED+ " ==> [read_mailbox_download_execute] failed during execution! -- "+ time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
print(e)
return False
def download_attachements(imap_server, username, password, verbose=True):
""" Read email inbox and download attachements.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
#INIT
if not os.path.isdir("/home/"+getpass.getuser()+"/Downloads"):
os.makedirs("/home/"+getpass.getuser()+"/Downloads")
mails = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
for part in mail_content.walk():
if not part.is_multipart():
if part["Content-Disposition"] == "attachement" and part["Attachement-type"] == "filetext":
username = getpass.getuser()
file = open(part["Attachement-filename"],"w")
file.write(part._payload)
file.close()
imap_logout(imap, verbose=False)
print(Fore.GREEN+ " ==> [download_attachements] --- " + time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
# In progress
def delete_old_emails(imap, time_laps=60):
delete_messages = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
status, data = imap.fetch(mail, "(RFC822)")
mail_content = email.message_from_string(data[0][1].decode("utf-8"))
if (time.time() - time.mktime(time.strptime(mail_content["Date"], "%a, %d %b %Y %H:%M:%S %z")) >= time_laps ):
delete_messages.append(mail)
delete_emails(imap, delete_messages)
def delete_emails(imap, mails):
""" Delete mails specified in attributs
imap (imap_object): This value is the imap server's object.
mails (list): This value is an email list to delete.
Returns:
list of str: all emails content
"""
for mail in mails:
imap.store(mail,"+FLAGS","\\Deleted")
imap.expunge()
def delete_all_emails(imap_server, username, password, verbose=True):
""" Delete all emails in INBOX.
imap_server_ip (str): This value is the imap server's ip.
imap_login (str): This value is the username login.
imap_password (str): This value is the password login.
verbose (boolean): Print information about function progress.
Returns:
list of str: all emails content
"""
imap = imap_connect(imap_server, username, password, verbose=False)
delete_messages = []
imap.select("INBOX")
status, mails = imap.search(None, "ALL")
for mail in mails[0].split():
delete_messages.append(mail)
delete_emails(imap, delete_messages)
status, mails = imap.search(None, "ALL")
if len(mails) == 1:
print(Fore.GREEN+ " ==> [delete_all_emails] was successfull --- " + time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return 0
print(Fore.RED+ " ==> [delete_all_emails] failed! --- " + time.strftime("%H:%M:%S", time.localtime()) +Style.RESET_ALL)
imap_logout(imap, verbose=False)
return 1
def imap_logout(imap, verbose=True):
""" Logout out to the imap service
imap (imap_object): This value is the imap server's object.
Returns:
None
"""
try:
imap.close()
imap.logout()
if verbose:
print(Fore.GREEN+ " ==> [imap_logout] was successfull" +Style.RESET_ALL)
except:
print(Fore.RED+ " ==> [imap_logout] failed" +Style.RESET_ALL)
def smtp_logout(smtp, verbose=True):
""" Logout out to the smtp service
smtp (smtp_object): This value is the smtp server's object.
Returns:
None
"""
try:
smtp.quit()
if verbose:
print(Fore.GREEN+ " ==> [smtp_logout] was successfull" +Style.RESET_ALL)
except:
print(Fore.RED+ " ==> [smtp_logout] failed" +Style.RESET_ALL)
|
[
"subprocess.getoutput",
"wget.download",
"tarfile.open",
"smtplib.SMTP",
"time.strptime",
"imaplib.IMAP4_SSL",
"smtplib.SMTP_SSL",
"imaplib.IMAP4",
"getpass.getuser",
"time.localtime",
"time.time",
"glob.glob"
] |
[((636, 662), 'smtplib.SMTP_SSL', 'SMTP_SSL', ([], {'host': 'smtp_server'}), '(host=smtp_server)\n', (644, 662), False, 'from smtplib import SMTP, SMTP_SSL\n'), ((1448, 1470), 'imaplib.IMAP4_SSL', 'IMAP4_SSL', (['imap_server'], {}), '(imap_server)\n', (1457, 1470), False, 'from imaplib import IMAP4_SSL, IMAP4\n'), ((6523, 6541), 'tarfile.open', 'tarfile.open', (['path'], {}), '(path)\n', (6535, 6541), False, 'import tarfile\n'), ((6625, 6648), 'glob.glob', 'glob.glob', (['"""*/*maj*.py"""'], {}), "('*/*maj*.py')\n", (6634, 6648), False, 'import glob\n'), ((6704, 6752), 'subprocess.getoutput', 'subprocess.getoutput', (["('python3 ' + python_script)"], {}), "('python3 ' + python_script)\n", (6724, 6752), False, 'import subprocess\n'), ((848, 870), 'smtplib.SMTP', 'SMTP', ([], {'host': 'smtp_server'}), '(host=smtp_server)\n', (852, 870), False, 'from smtplib import SMTP, SMTP_SSL\n'), ((1675, 1693), 'imaplib.IMAP4', 'IMAP4', (['imap_server'], {}), '(imap_server)\n', (1680, 1693), False, 'from imaplib import IMAP4_SSL, IMAP4\n'), ((8945, 8956), 'time.time', 'time.time', ([], {}), '()\n', (8954, 8956), False, 'import time\n'), ((6271, 6293), 'wget.download', 'wget.download', (['element'], {}), '(element)\n', (6284, 6293), False, 'import wget\n'), ((7732, 7749), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (7747, 7749), False, 'import getpass\n'), ((7794, 7811), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (7809, 7811), False, 'import getpass\n'), ((8294, 8311), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (8309, 8311), False, 'import getpass\n'), ((8582, 8598), 'time.localtime', 'time.localtime', ([], {}), '()\n', (8596, 8598), False, 'import time\n'), ((8971, 9034), 'time.strptime', 'time.strptime', (["mail_content['Date']", '"""%a, %d %b %Y %H:%M:%S %z"""'], {}), "(mail_content['Date'], '%a, %d %b %Y %H:%M:%S %z')\n", (8984, 9034), False, 'import time\n'), ((10582, 10598), 'time.localtime', 'time.localtime', ([], {}), '()\n', (10596, 10598), False, 'import time\n'), ((5228, 5244), 'time.localtime', 'time.localtime', ([], {}), '()\n', (5242, 5244), False, 'import time\n'), ((6874, 6890), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6888, 6890), False, 'import time\n'), ((10396, 10412), 'time.localtime', 'time.localtime', ([], {}), '()\n', (10410, 10412), False, 'import time\n'), ((3851, 3867), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3865, 3867), False, 'import time\n'), ((4075, 4091), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4089, 4091), False, 'import time\n'), ((6054, 6070), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6068, 6070), False, 'import time\n'), ((6446, 6462), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6460, 6462), False, 'import time\n'), ((7078, 7094), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7092, 7094), False, 'import time\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name="pyims",
version='0.1.2',
description='A python wrapper for the IMS Word Sense Disambiguation tool (Zhong and Ng, 2010)',
url='http://github.com/vishnumenon/pyims',
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'nltk',
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
zip_safe=False)
|
[
"setuptools.find_packages"
] |
[((457, 483), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (481, 483), False, 'import setuptools\n')]
|
# UnitTests of all triggmine events
import unittest
import datetime
from client import Client
class ClientTest(unittest.TestCase):
def setUp(self):
self.client = Client('YOUR API_URL', 'YOUR API_KEY')
# Registration event
def test_registration_success(self):
response = self.client.registration.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(201, response.status_code)
# Diagnostic event
def test_diagnostic_success(self):
response = self.client.diagnostic.create(date_created=str(datetime.datetime.now()),
diagnostic_type="Install_Test_Plugin", description="TestCms", status=1)
self.assertEqual(201, response.status_code)
# Cart event
def test_cart_success(self):
response = self.client.cart.create(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
# Login event
def test_login_success(self):
response = self.client.login.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(200, response.status_code)
# Logout event
def test_logout_success(self):
response = self.client.logout.create(device_id='4c3d48512d48b2603092b5a45ba74c8c',
device_id_1='465060737',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created=str(datetime.datetime.now()))
self.assertEqual(200, response.status_code)
# History event
def test_history_success(self):
response = self.client.history.create(orders=
[dict(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37")),
dict(order_id="22",price_total="210.86",qty_total="1",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))])
self.assertEqual(200, response.status_code)
# Navigation event
def test_navigation_success(self):
response = self.client.navigation.create(user_agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
self.assertEqual(201, response.status_code)
# Order event
def test_order_success(self):
response = self.client.order.create(order_id="22",price_total="210.86",qty_total="1",status="Paid",
products=[dict(product_id= "421",
product_name= "Elizabeth Knit Top",
product_desc= "Loose fitting from the shoulders, open weave knit top. Semi sheer. Slips on. Faux button closure detail on the back. Linen/Cotton. Machine wash.",
product_sku= "wbk013",
product_image= "https://1924magento.triggmine.com.ua/media/catalog/product/cache/1/image/265x/9df78eab33525d08d6e5fb8d27136e95/w/b/wbk012t.jpg",
product_url= "https://1924magento.triggmine.com.ua/elizabeth-knit-top-596.html",
product_qty= 1,
product_price= 210,
product_total_val= 210,
product_categories= ['New Arrivals','Tops & Blouses'])],
customer=dict(device_id='4c3d48512d48b2603092b5a45ba74c8c',
customer_id='1',
customer_first_name='Jhon',
customer_last_name='Doe',
customer_email='<EMAIL>',
customer_date_created="2016-09-08 10:20:37"))
self.assertEqual(201, response.status_code)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"datetime.datetime.now",
"client.Client"
] |
[((11253, 11268), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11266, 11268), False, 'import unittest\n'), ((177, 215), 'client.Client', 'Client', (['"""YOUR API_URL"""', '"""YOUR API_KEY"""'], {}), "('YOUR API_URL', 'YOUR API_KEY')\n", (183, 215), False, 'from client import Client\n'), ((840, 863), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (861, 863), False, 'import datetime\n'), ((1051, 1074), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1072, 1074), False, 'import datetime\n'), ((3462, 3485), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3483, 3485), False, 'import datetime\n'), ((4111, 4134), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4132, 4134), False, 'import datetime\n')]
|
from flask_restful import Resource, reqparse
parser = reqparse.RequestParser()
parser.add_argument('command', required=True)
parser.add_argument('docker', required=True)
class Build(Resource):
def get(self):
return {'status': 'building'}
def post(self):
args = parser.parse_args()
print(args)
return {'status': 'started'}
|
[
"flask_restful.reqparse.RequestParser"
] |
[((55, 79), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (77, 79), False, 'from flask_restful import Resource, reqparse\n')]
|
from pathlib import Path
from shutil import which
from subprocess import run, PIPE
import click
from .main import main, lprint
@main.command()
@click.pass_context
@click.argument('watcher')
def symlink(ctx, watcher):
"""Locally install a symlink to sera"""
if ctx.parent.params['watcher']:
click.echo("This command runs locally")
raise click.Abort
source = Path(which('sera'))
target = source.parent / watcher
if ctx.obj['verbosity']:
click.echo('Installing symlink at %s' % str(target))
out = run(
['ln', '-s', str(source), str(target)],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True)
return lprint(ctx, out)
|
[
"click.echo",
"click.argument",
"shutil.which"
] |
[((168, 193), 'click.argument', 'click.argument', (['"""watcher"""'], {}), "('watcher')\n", (182, 193), False, 'import click\n'), ((310, 349), 'click.echo', 'click.echo', (['"""This command runs locally"""'], {}), "('This command runs locally')\n", (320, 349), False, 'import click\n'), ((394, 407), 'shutil.which', 'which', (['"""sera"""'], {}), "('sera')\n", (399, 407), False, 'from shutil import which\n')]
|
from typing import Optional, Dict, List
import aiohttp
plate_to_version = {
'真1': 'maimai',
'真2': 'maimai PLUS',
'超': 'maimai GreeN',
'檄': 'maimai GreeN PLUS',
'橙': 'maimai ORANGE',
'暁': 'maimai ORANGE PLUS',
'晓': 'maimai ORANGE PLUS',
'桃': 'maimai PiNK',
'櫻': 'maimai PiNK PLUS',
'樱': 'maimai PiNK PLUS',
'紫': 'maimai MURASAKi',
'菫': 'maimai MURASAKi PLUS',
'堇': 'maimai MURASAKi PLUS',
'白': 'maimai MiLK',
'雪': 'MiLK PLUS',
'輝': 'maimai FiNALE',
'辉': 'maimai FiNALE',
'熊': 'maimai でらっくす',
'華': 'maimai でらっくす PLUS',
'华': 'maimai でらっくす PLUS',
'爽': 'maimai でらっくす Splash'
}
async def get_player_plate(payload: Dict):
async with aiohttp.request("POST", "https://www.diving-fish.com/api/maimaidxprober/query/plate", json=payload) as resp:
if resp.status == 400:
return None, 400
elif resp.status == 403:
return None, 403
plate_data = await resp.json()
return plate_data, 0
|
[
"aiohttp.request"
] |
[((802, 905), 'aiohttp.request', 'aiohttp.request', (['"""POST"""', '"""https://www.diving-fish.com/api/maimaidxprober/query/plate"""'], {'json': 'payload'}), "('POST',\n 'https://www.diving-fish.com/api/maimaidxprober/query/plate', json=payload)\n", (817, 905), False, 'import aiohttp\n')]
|
import os
import re
import time
import numpy as np
from msedge.selenium_tools import EdgeOptions, Edge
from selenium.webdriver.common.action_chains import ActionChains
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.41'
}
print('load data...')
sha256set = np.loadtxt(os.getcwd() + "/Gorgon Group.csv", delimiter=",", usecols=(0), dtype=str, skiprows=1) # usecols=(0) 0表示hash值是第0列,这个需要按情况做修改。
print('finish data load...')
opt = EdgeOptions() # 使用基于Chromium内核的Microsoft Edge浏览器,其他浏览器需要看情况更改
opt.use_chromium = True
# opt.add_argument("headless") # 无头浏览器,如果运行出错请注释掉这句。
opt.add_argument("disable-gpu")
opt.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = Edge(executable_path = os.getcwd() + "/msedgedriver.exe", options = opt) # 这里msedgedriver.exe需要跟下载的webdriver名字对应,默认在项目文件根目录
for filehash in sha256set:
noerror = 1
while(noerror):
try:
fileurl = 'https://www.virustotal.com/gui/file/' + filehash + '/behavior/VirusTotal%20Cuckoofork'
driver.get(fileurl)
driver.implicitly_wait(7)
driver.find_element_by_tag_name('body')
time.sleep(1.5)
print(driver.current_url)
if driver.current_url == "https://www.virustotal.com/gui/captcha": # 检测是否被网站拦截,拦截了手动通过图形验证码限时60s
ActionChains(driver).move_by_offset(342, 146).click().perform() # 自动点击,打开图形验证码
ActionChains(driver).move_by_offset(-342, -146).perform()
time.sleep(90) # 等待手动通过
matchresult = re.findall(r"file.(.*?).detection", driver.current_url, re.M)
with open(os.getcwd() + '/sha256.txt', 'a+', encoding='UTF-8') as f: # 保存文件
f.write(matchresult[0] + '\n')
f.close()
noerror = 0
except:
noerror = 1
|
[
"msedge.selenium_tools.EdgeOptions",
"time.sleep",
"os.getcwd",
"selenium.webdriver.common.action_chains.ActionChains",
"re.findall"
] |
[((541, 554), 'msedge.selenium_tools.EdgeOptions', 'EdgeOptions', ([], {}), '()\n', (552, 554), False, 'from msedge.selenium_tools import EdgeOptions, Edge\n'), ((380, 391), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (389, 391), False, 'import os\n'), ((811, 822), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (820, 822), False, 'import os\n'), ((1232, 1247), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (1242, 1247), False, 'import time\n'), ((1630, 1690), 're.findall', 're.findall', (['"""file.(.*?).detection"""', 'driver.current_url', 're.M'], {}), "('file.(.*?).detection', driver.current_url, re.M)\n", (1640, 1690), False, 'import re\n'), ((1580, 1594), 'time.sleep', 'time.sleep', (['(90)'], {}), '(90)\n', (1590, 1594), False, 'import time\n'), ((1714, 1725), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1723, 1725), False, 'import os\n'), ((1506, 1526), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (1518, 1526), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((1411, 1431), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (1423, 1431), False, 'from selenium.webdriver.common.action_chains import ActionChains\n')]
|
import pixiedust
my_logger = pixiedust.getLogger(__name__)
|
[
"pixiedust.getLogger"
] |
[((29, 58), 'pixiedust.getLogger', 'pixiedust.getLogger', (['__name__'], {}), '(__name__)\n', (48, 58), False, 'import pixiedust\n')]
|
# Generated by Django 2.1.11 on 2020-06-04 09:19
from django.db import migrations, models
def fix_period_before_after(apps, schema_editor):
# noinspection PyPep8Naming
Form = apps.get_model("iaso", "Form")
for form in Form.objects.filter(period_type=None).exclude(periods_before_allowed=0, periods_after_allowed=0):
form.periods_before_allowed = 0
form.periods_after_allowed = 0
form.save()
class Migration(migrations.Migration):
dependencies = [("iaso", "0051_device_position")]
operations = [
migrations.AlterField(
model_name="form",
name="period_type",
field=models.TextField(
blank=True,
choices=[("MONTH", "Month"), ("QUARTER", "Quarter"), ("SIX_MONTH", "Six-month"), ("YEAR", "Year")],
null=True,
),
),
migrations.AlterField(model_name="form", name="periods_after_allowed", field=models.IntegerField(default=0)),
migrations.AlterField(model_name="form", name="periods_before_allowed", field=models.IntegerField(default=0)),
migrations.RunPython(fix_period_before_after, reverse_code=migrations.RunPython.noop),
]
|
[
"django.db.migrations.RunPython",
"django.db.models.TextField",
"django.db.models.IntegerField"
] |
[((1119, 1209), 'django.db.migrations.RunPython', 'migrations.RunPython', (['fix_period_before_after'], {'reverse_code': 'migrations.RunPython.noop'}), '(fix_period_before_after, reverse_code=migrations.\n RunPython.noop)\n', (1139, 1209), False, 'from django.db import migrations, models\n'), ((659, 802), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'choices': "[('MONTH', 'Month'), ('QUARTER', 'Quarter'), ('SIX_MONTH', 'Six-month'), (\n 'YEAR', 'Year')]", 'null': '(True)'}), "(blank=True, choices=[('MONTH', 'Month'), ('QUARTER',\n 'Quarter'), ('SIX_MONTH', 'Six-month'), ('YEAR', 'Year')], null=True)\n", (675, 802), False, 'from django.db import migrations, models\n'), ((959, 989), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (978, 989), False, 'from django.db import migrations, models\n'), ((1078, 1108), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1097, 1108), False, 'from django.db import migrations, models\n')]
|
from hca.dss import DSSClient
dss = DSSClient()
dss.logout()
|
[
"hca.dss.DSSClient"
] |
[((37, 48), 'hca.dss.DSSClient', 'DSSClient', ([], {}), '()\n', (46, 48), False, 'from hca.dss import DSSClient\n')]
|
import sys
import os
import os.path
import random
from pathlib import Path
import torch
import torchaudio
from .audiodataset import AUDIO_EXTENSIONS, default_loader
from ..dataset import PureDatasetFolder, has_file_allowed_extension
class TAU2019(PureDatasetFolder):
"""TAU urban acoustic scene 2019 dataset.
This dataset was used for DCASE 2019 Task 1.
For using this dataset, download the dataset from the following links:
https://zenodo.org/record/2589280#.XvWs0Zbgprk
https://zenodo.org/record/3063822#.XvWs55bgprk
Then, unzip them in the *root* folder.
"""
def __init__(self, root, mode, loader=default_loader, extensions=AUDIO_EXTENSIONS,
transforms=None, transform=None, target_transform=None,
is_valid_file=None,
pre_load=False, pre_transform=None,
pre_target_transform=None, pre_transforms=None):
super(TAU2019, self).__init__(root,
transforms=transforms,
transform=transform,
target_transform=target_transform)
self.MODES = ('train', 'evaluate', 'test')
if mode not in self.MODES:
raise ValueError("mode \"{}\" is not in {}".format(mode, self.MODES))
self.mode = mode
classes, class_to_idx = self._define_classes()
samples = self._make_dataset(str(self.root), mode,
class_to_idx, extensions, is_valid_file)
self.loader = loader
self.extensions = extensions
self.samples = samples
self.targets = [s[1] for s in samples]
self.classes = classes
self.class_to_idx = class_to_idx
has_pre_transforms = pre_transforms is not None
has_pre_separate_transform = pre_transform is not None or pre_target_transform is not None
if has_pre_transforms and has_pre_separate_transform:
raise ValueError("Only pre_transforms or pre_transform/pre_target_transform can "
"be passed as argument")
if has_pre_separate_transform:
pre_transforms = torchdataset.transform.SeparatedTransform(pre_transform, pre_target_transform)
self.pre_transforms = pre_transforms
self.pre_load = pre_load
if pre_load:
self.pre_process()
def pre_process(self, ):
preprocessed_samples = []
for i in range(len(self)):
sys.stdout.write("\rloaded {0} / {1}".format(i+1, len(self)))
sys.stdout.flush()
path, target = self.samples[i]
sample = self.loader(path)
if self.pre_transforms is not None:
sample, target = self.pre_transforms(sample, target)
preprocessed_samples.append((sample, target))
self.preprocessed_samples = preprocessed_samples
sys.stdout.write("\n")
def _define_classes(self, ):
classes = ['airport', 'shopping_mall', 'metro_station', 'street_pedestrian',
'public_square', 'street_traffic', 'tram', 'bus', 'metro', 'park']
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def _make_dataset(self, directory, mode, class_to_idx, extensions=None, is_valid_file=None):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
if not os.path.isdir(directory):
raise ValueError("{} is not a directory".format(directory))
with open(os.path.join(directory, 'evaluation_setup', 'fold1_'+mode+'.csv')) as f:
for i, line in enumerate(f):
if i == 0:
continue
line = line.rstrip('\n')
fname = line.split('\t')[0]
path = os.path.join(directory, fname)
class_index = class_to_idx[os.path.split(fname)[1].split('-')[0]]
item = path, class_index
instances.append(item)
return instances
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
if self.pre_load:
sample, target = self.preprocessed_samples[index]
else:
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
|
[
"os.path.join",
"os.path.split",
"os.path.isdir",
"sys.stdout.flush",
"os.path.expanduser",
"sys.stdout.write"
] |
[((2934, 2956), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2950, 2956), False, 'import sys\n'), ((3443, 3472), 'os.path.expanduser', 'os.path.expanduser', (['directory'], {}), '(directory)\n', (3461, 3472), False, 'import os\n'), ((2592, 2610), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2608, 2610), False, 'import sys\n'), ((3915, 3939), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (3928, 3939), False, 'import os\n'), ((4031, 4100), 'os.path.join', 'os.path.join', (['directory', '"""evaluation_setup"""', "('fold1_' + mode + '.csv')"], {}), "(directory, 'evaluation_setup', 'fold1_' + mode + '.csv')\n", (4043, 4100), False, 'import os\n'), ((4309, 4339), 'os.path.join', 'os.path.join', (['directory', 'fname'], {}), '(directory, fname)\n', (4321, 4339), False, 'import os\n'), ((4383, 4403), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (4396, 4403), False, 'import os\n')]
|
import os
import torch
import hashlib
from collections import OrderedDict
from util.env import env_factory, eval_policy
from util.logo import print_logo
if __name__ == "__main__":
import sys, argparse, time, os
parser = argparse.ArgumentParser()
parser.add_argument("--nolog", action='store_true')
print_logo(subtitle="Recurrent Reinforcement Learning for Robotics.")
if len(sys.argv) < 2:
print("Usage: python apex.py [algorithm name]", sys.argv)
elif sys.argv[1] == 'ars':
"""
Utility for running Augmented Random Search.
"""
from algos.ars import run_experiment
sys.argv.remove(sys.argv[1])
parser.add_argument("--workers", type=int, default=4)
parser.add_argument("--hidden_size", default=32, type=int) # neurons in hidden layer
parser.add_argument("--timesteps", "-t", default=1e8, type=float) # timesteps to run experiment ofr
parser.add_argument("--load_model", "-l", default=None, type=str) # load a model from a saved file.
parser.add_argument('--std', "-sd", default=0.0075, type=float) # the standard deviation of the parameter noise vectors
parser.add_argument("--deltas", "-d", default=64, type=int) # number of parameter noise vectors to use
parser.add_argument("--lr", "-lr", default=0.01, type=float) # the learning rate used to update policy
parser.add_argument("--reward_shift", "-rs", default=1, type=float) # the reward shift (to counter Gym's alive_bonus)
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--algo", "-a", default='v1', type=str) # whether to use ars v1 or v2
parser.add_argument("--normalize" '-n', action='store_true') # normalize states online
parser.add_argument("--recurrent", "-r", action='store_true') # whether to use a recurrent policy
parser.add_argument("--logdir", default="./logs/ars/", type=str)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
parser.add_argument("--average_every", default=10, type=int)
parser.add_argument("--save_model", "-m", default=None, type=str) # where to save the trained model to
parser.add_argument("--redis", default=None)
args = parser.parse_args()
run_experiment(args)
elif sys.argv[1] == 'ddpg':
sys.argv.remove(sys.argv[1])
"""
Utility for running Recurrent/Deep Deterministic Policy Gradients.
"""
from algos.off_policy import run_experiment
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps in replay buffer
parser.add_argument("--start_timesteps", default=1e4, type=int) # number of timesteps to generate random actions for
parser.add_argument("--load_actor", default=None, type=str) # load an actor from a .pt file
parser.add_argument("--load_critic", default=None, type=str) # load a critic from a .pt file
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--expl_noise', default=0.2, type=float) # random noise used for exploration
parser.add_argument('--tau', default=0.01, type=float) # update factor for target networks
parser.add_argument("--a_lr", "-alr", default=1e-5, type=float) # adam learning rate for critic
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--center_reward", "-r", action='store_true') # normalize rewards to a normal distribution
parser.add_argument("--normc_init", default=True, type=bool) # using col norm to init weights
parser.add_argument("--normalize" '-n', action='store_true') # normalize states online
parser.add_argument("--batch_size", default=64, type=int) # batch size for policy update
parser.add_argument("--updates", default=1, type=int) # (if recurrent) number of times to update policy per episode
parser.add_argument("--eval_every", default=100, type=int) # how often to evaluate the trained policy
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--logdir", default="./logs/ddpg/", type=str)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
args = parser.parse_args()
args.algo = 'ddpg'
run_experiment(args)
elif sys.argv[1] == 'td3':
sys.argv.remove(sys.argv[1])
"""
Utility for running Twin-Delayed Deep Deterministic policy gradients.
"""
from algos.off_policy import run_experiment
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps in replay buffer
parser.add_argument("--start_timesteps", default=1e4, type=float) # number of timesteps to generate random actions for
parser.add_argument("--load_actor", default=None, type=str) # load an actor from a .pt file
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--expl_noise', default=0.1, type=float) # random noise used for exploration
parser.add_argument('--max_action', default=1.0, type=float) #
parser.add_argument('--policy_noise', default=0.2, type=float) #
parser.add_argument('--noise_clip', default=0.5, type=float) #
parser.add_argument('--tau', default=0.005, type=float) # update factor for target networks
parser.add_argument("--a_lr", "-alr", default=3e-4, type=float) # adam learning rate for critic
parser.add_argument("--c_lr", "-clr", default=3e-4, type=float) # adam learning rate for actor
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--center_reward", "-r", action='store_true') # normalize rewards to a normal distribution
parser.add_argument("--batch_size", default=256, type=int) # batch size for policy update
parser.add_argument("--updates", default=1, type=int) # (if recurrent) number of times to update policy per episode
parser.add_argument("--update_freq", default=1, type=int) # how many episodes to skip before updating
parser.add_argument("--eval_every", default=100, type=int) # how often to evaluate the trained policy
parser.add_argument("--save_actor", default=None, type=str)
#parser.add_argument("--save_critics", default=None, type=str)
parser.add_argument("--logdir", default="./logs/td3/", type=str)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
args = parser.parse_args()
args.algo = 'td3'
run_experiment(args)
elif sys.argv[1] == 'ppo':
sys.argv.remove(sys.argv[1])
"""
Utility for running Proximal Policy Optimization.
"""
from algos.ppo import run_experiment
parser.add_argument("--seed", default=0, type=int) # number of timesteps to run experiment for
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps to run experiment for
parser.add_argument("--env_name", default='Cassie-v0', type=str)
parser.add_argument("--traj_len", "-tl", default=400, type=int) # max trajectory length for environment
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--num_steps", default=5000, type=int)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--std', default=0.13, type=float) # the fixed exploration std
parser.add_argument("--a_lr", "-alr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for critic
parser.add_argument("--eps", "-ep", default=1e-5, type=float) # adam eps
parser.add_argument("--kl", default=0.02, type=float) # kl abort threshold
parser.add_argument("--entropy_coeff", default=0.0, type=float)
parser.add_argument("--grad_clip", default=0.05, type=float)
parser.add_argument("--batch_size", default=64, type=int) # batch size for policy update
parser.add_argument("--epochs", default=3, type=int) # number of updates per iter
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--logdir", default="./logs/ppo/", type=str)
parser.add_argument("--workers", default=4, type=int)
parser.add_argument("--redis", default=None, type=str)
args = parser.parse_args()
run_experiment(args)
elif sys.argv[1] == 'sac':
sys.argv.remove(sys.argv[1])
"""
Utility for running Soft Actor-Critic.
"""
from algos.off_policy import run_experiment
parser.add_argument("--seed", default=0, type=int) # number of timesteps to run experiment for
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps to run experiment for
parser.add_argument("--env_name", default='Cassie-v0', type=str)
parser.add_argument("--traj_len", "-tl", default=400, type=int) # max trajectory length for environment
parser.add_argument("--start_timesteps", default=10000, type=int)
parser.add_argument("--eval_every", default=100, type=int)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--tau', default=1e-2, type=float)
parser.add_argument("--a_lr", "-alr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for critic
parser.add_argument("--alpha", default=None, type=float) # adam learning rate for critic
parser.add_argument("--grad_clip", default=0.05, type=float)
parser.add_argument("--batch_size", default=128, type=int) # batch size for policy update
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--logdir", default="./logs/sac/", type=str)
args = parser.parse_args()
args.algo = 'sac'
run_experiment(args)
elif sys.argv[1] == 'eval':
sys.argv.remove(sys.argv[1])
parser.add_argument("--policy", default="./trained_models/ddpg/ddpg_actor.pt", type=str)
parser.add_argument("--env_name", default=None, type=str)
parser.add_argument("--traj_len", default=400, type=int)
args = parser.parse_args()
policy = torch.load(args.policy)
eval_policy(policy, min_timesteps=100000, env_name=args.env_name, max_traj_len=args.traj_len)
elif sys.argv[1] == 'cassie':
sys.argv.remove(sys.argv[1])
from cassie.udp import run_udp
parser.add_argument("--policy", default='logs/ppo/Cassie-nodelta-stateest-clockbased/bcbc77-seed0/actor.pt', type=str)
args = parser.parse_args()
run_udp(args)
else:
print("Invalid option '{}'".format(sys.argv[1]))
|
[
"argparse.ArgumentParser",
"util.env.eval_policy",
"torch.load",
"sys.argv.remove",
"algos.off_policy.run_experiment",
"util.logo.print_logo",
"cassie.udp.run_udp"
] |
[((227, 252), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (250, 252), False, 'import sys, argparse, time, os\n'), ((315, 384), 'util.logo.print_logo', 'print_logo', ([], {'subtitle': '"""Recurrent Reinforcement Learning for Robotics."""'}), "(subtitle='Recurrent Reinforcement Learning for Robotics.')\n", (325, 384), False, 'from util.logo import print_logo\n'), ((615, 643), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (630, 643), False, 'import sys, argparse, time, os\n'), ((2610, 2630), 'algos.off_policy.run_experiment', 'run_experiment', (['args'], {}), '(args)\n', (2624, 2630), False, 'from algos.off_policy import run_experiment\n'), ((2666, 2694), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2681, 2694), False, 'import sys, argparse, time, os\n'), ((5336, 5356), 'algos.off_policy.run_experiment', 'run_experiment', (['args'], {}), '(args)\n', (5350, 5356), False, 'from algos.off_policy import run_experiment\n'), ((5392, 5420), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (5407, 5420), False, 'import sys, argparse, time, os\n'), ((8114, 8134), 'algos.off_policy.run_experiment', 'run_experiment', (['args'], {}), '(args)\n', (8128, 8134), False, 'from algos.off_policy import run_experiment\n'), ((8169, 8197), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (8184, 8197), False, 'import sys, argparse, time, os\n'), ((10614, 10634), 'algos.off_policy.run_experiment', 'run_experiment', (['args'], {}), '(args)\n', (10628, 10634), False, 'from algos.off_policy import run_experiment\n'), ((10669, 10697), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (10684, 10697), False, 'import sys, argparse, time, os\n'), ((12724, 12744), 'algos.off_policy.run_experiment', 'run_experiment', (['args'], {}), '(args)\n', (12738, 12744), False, 'from algos.off_policy import run_experiment\n'), ((12780, 12808), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (12795, 12808), False, 'import sys, argparse, time, os\n'), ((13071, 13094), 'torch.load', 'torch.load', (['args.policy'], {}), '(args.policy)\n', (13081, 13094), False, 'import torch\n'), ((13100, 13197), 'util.env.eval_policy', 'eval_policy', (['policy'], {'min_timesteps': '(100000)', 'env_name': 'args.env_name', 'max_traj_len': 'args.traj_len'}), '(policy, min_timesteps=100000, env_name=args.env_name,\n max_traj_len=args.traj_len)\n', (13111, 13197), False, 'from util.env import env_factory, eval_policy\n'), ((13231, 13259), 'sys.argv.remove', 'sys.argv.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (13246, 13259), False, 'import sys, argparse, time, os\n'), ((13455, 13468), 'cassie.udp.run_udp', 'run_udp', (['args'], {}), '(args)\n', (13462, 13468), False, 'from cassie.udp import run_udp\n')]
|
import read_data as RD
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
X = RD.read_data()
print('X = ',X.shape)
X_mean = np.reshape(np.sum(X,1)/X.shape[1],[ X.shape[0],1])
X = X-X_mean
print('X_centerred = ',X.shape)
[U,S,V] = np.linalg.svd(X, full_matrices=False)
print('U = ',U.shape)
print('S = ',S.shape)
print('V = ',V.shape)
N = 12#number of eigen images
Eig_im = U[:,0:N]
plt.figure(figsize=(10,10))
for i in range(0,N):
plt.subplot(int(np.sqrt(N)),int(np.ceil(N/int(np.sqrt(N)))),i+1)
im = np.reshape(Eig_im[:,i],[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('Eigen Image = '+str(i+1))
plt.savefig('Eigen_Images.png')
plt.savefig('Eigen_Images.tif')
Y = np.matmul(np.transpose(U),X)
print('Y = ',Y.shape)
plt.figure(figsize=(10,10))
Np = 10#Number of projection coefficients to plot
Ni = 4#Number of images
images = ['a','b','c','d']
for i in range(0,Ni):
plt.plot(np.arange(1,Np+1),Y[0:Np,i],label='Image = '+images[i])
plt.xlabel('Eigenvectors',fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.ylabel('Magnitude of the projection coefficient',fontsize=20)
plt.yticks(weight = 'bold',fontsize=15)
plt.legend(fontsize=20)
plt.savefig('Projection_Coefficients.png')
plt.savefig('Projection_Coefficients.tif')
#Image synthesis
ind = 0#index of the image to synthesize
m = [1, 5, 10, 15, 20, 30]
plt.figure(figsize=(10,15))
for i in range(0,len(m)):
X_hat = np.reshape(np.matmul(U[:,0:m[i]],Y[0:m[i],ind]),[X.shape[0],1])
print(X_hat.shape)
print(X_mean.shape)
X_hat += X_mean
plt.subplot(3,2,i+1)
im = np.reshape(X_hat,[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('m = '+str(m[i]),fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.yticks(weight = 'bold',fontsize=15)
#img_out = Image.fromarray(im.astype(np.uint8))
#img_out.save('Im_reconstruction_'+str(m[i])+'.tif')
plt.savefig('Im_reconstruction.png')
plt.savefig('Im_reconstruction.tif')
|
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"numpy.sqrt",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.figure",
"read_data.read_data",
"matplotlib.pyplot.yticks",
"numpy.matmul",
"numpy.linalg.svd",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
] |
[((101, 115), 'read_data.read_data', 'RD.read_data', ([], {}), '()\n', (113, 115), True, 'import read_data as RD\n'), ((253, 290), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {'full_matrices': '(False)'}), '(X, full_matrices=False)\n', (266, 290), True, 'import numpy as np\n'), ((406, 434), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (416, 434), True, 'import matplotlib.pyplot as plt\n'), ((653, 684), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Eigen_Images.png"""'], {}), "('Eigen_Images.png')\n", (664, 684), True, 'import matplotlib.pyplot as plt\n'), ((685, 716), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Eigen_Images.tif"""'], {}), "('Eigen_Images.tif')\n", (696, 716), True, 'import matplotlib.pyplot as plt\n'), ((773, 801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (783, 801), True, 'import matplotlib.pyplot as plt\n'), ((990, 1029), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Eigenvectors"""'], {'fontsize': '(20)'}), "('Eigenvectors', fontsize=20)\n", (1000, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1029, 1067), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1039, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1135), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude of the projection coefficient"""'], {'fontsize': '(20)'}), "('Magnitude of the projection coefficient', fontsize=20)\n", (1079, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1135, 1173), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1145, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1198), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (1185, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1241), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Projection_Coefficients.png"""'], {}), "('Projection_Coefficients.png')\n", (1210, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1284), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Projection_Coefficients.tif"""'], {}), "('Projection_Coefficients.tif')\n", (1253, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1371, 1399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (1381, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1927), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Im_reconstruction.png"""'], {}), "('Im_reconstruction.png')\n", (1902, 1927), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Im_reconstruction.tif"""'], {}), "('Im_reconstruction.tif')\n", (1939, 1964), True, 'import matplotlib.pyplot as plt\n'), ((527, 561), 'numpy.reshape', 'np.reshape', (['Eig_im[:, i]', '[64, 64]'], {}), '(Eig_im[:, i], [64, 64])\n', (537, 561), True, 'import numpy as np\n'), ((560, 614), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'cmap': 'plt.cm.gray', 'interpolation': '"""none"""'}), "(im, cmap=plt.cm.gray, interpolation='none')\n", (570, 614), True, 'import matplotlib.pyplot as plt\n'), ((732, 747), 'numpy.transpose', 'np.transpose', (['U'], {}), '(U)\n', (744, 747), True, 'import numpy as np\n'), ((1557, 1581), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(i + 1)'], {}), '(3, 2, i + 1)\n', (1568, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1611), 'numpy.reshape', 'np.reshape', (['X_hat', '[64, 64]'], {}), '(X_hat, [64, 64])\n', (1594, 1611), True, 'import numpy as np\n'), ((1611, 1665), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'cmap': 'plt.cm.gray', 'interpolation': '"""none"""'}), "(im, cmap=plt.cm.gray, interpolation='none')\n", (1621, 1665), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1745), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1717, 1745), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1786), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1758, 1786), True, 'import matplotlib.pyplot as plt\n'), ((158, 170), 'numpy.sum', 'np.sum', (['X', '(1)'], {}), '(X, 1)\n', (164, 170), True, 'import numpy as np\n'), ((934, 954), 'numpy.arange', 'np.arange', (['(1)', '(Np + 1)'], {}), '(1, Np + 1)\n', (943, 954), True, 'import numpy as np\n'), ((1445, 1484), 'numpy.matmul', 'np.matmul', (['U[:, 0:m[i]]', 'Y[0:m[i], ind]'], {}), '(U[:, 0:m[i]], Y[0:m[i], ind])\n', (1454, 1484), True, 'import numpy as np\n'), ((472, 482), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (479, 482), True, 'import numpy as np\n'), ((502, 512), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (509, 512), True, 'import numpy as np\n')]
|
from django.db import models
from manager_utils import ManagerUtilsQuerySet, ManagerUtilsManager
from activatable_model.signals import model_activations_changed
class ActivatableQuerySet(ManagerUtilsQuerySet):
"""
Provides bulk activation/deactivation methods.
"""
def update(self, *args, **kwargs):
if self.model.ACTIVATABLE_FIELD_NAME in kwargs:
# Fetch the instances that are about to be updated if they have an activatable flag. This
# is because their activatable flag may be changed in the subsequent update, causing us
# to potentially lose what this original query referenced
updated_instance_ids = list(self.values_list('id', flat=True))
ret_val = super(ActivatableQuerySet, self).update(*args, **kwargs)
if self.model.ACTIVATABLE_FIELD_NAME in kwargs and updated_instance_ids:
# Refetch the instances that were updated and send them to the activation signal
model_activations_changed.send(
self.model, instance_ids=updated_instance_ids,
is_active=kwargs[self.model.ACTIVATABLE_FIELD_NAME])
return ret_val
def activate(self):
return self.update(**{
self.model.ACTIVATABLE_FIELD_NAME: True
})
def deactivate(self):
return self.update(**{
self.model.ACTIVATABLE_FIELD_NAME: False
})
def delete(self, force=False):
return super(ActivatableQuerySet, self).delete() if force else self.deactivate()
class ActivatableManager(ManagerUtilsManager):
def get_queryset(self):
return ActivatableQuerySet(self.model)
def activate(self):
return self.get_queryset().activate()
def deactivate(self):
return self.get_queryset().deactivate()
class BaseActivatableModel(models.Model):
"""
Adds an is_active flag and processes information about when an is_active flag is changed.
"""
class Meta:
abstract = True
# The name of the Boolean field that determines if this model is active or inactive. A field
# must be defined with this name, and it must be a BooleanField. Note that the reason we don't
# define a BooleanField is because this would eliminate the ability for the user to easily
# define default values for the field and if it is indexed.
ACTIVATABLE_FIELD_NAME = 'is_active'
objects = ActivatableManager()
# The original activatable field value, for determining when it changes
__original_activatable_value = None
def __init__(self, *args, **kwargs):
super(BaseActivatableModel, self).__init__(*args, **kwargs)
# Keep track of the original activatable value to know when it changes
self.__original_activatable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)
def save(self, *args, **kwargs):
"""
A custom save method that handles figuring out when something is activated or deactivated.
"""
current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)
is_active_changed = self.id is None or self.__original_activatable_value != current_activable_value
self.__original_activatable_value = current_activable_value
ret_val = super(BaseActivatableModel, self).save(*args, **kwargs)
# Emit the signal for when the is_active flag is changed
if is_active_changed:
model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)
return ret_val
def delete(self, force=False, **kwargs):
"""
It is impossible to delete an activatable model unless force is True. This function instead sets it to inactive.
"""
if force:
return super(BaseActivatableModel, self).delete(**kwargs)
else:
setattr(self, self.ACTIVATABLE_FIELD_NAME, False)
return self.save(update_fields=[self.ACTIVATABLE_FIELD_NAME])
|
[
"activatable_model.signals.model_activations_changed.send"
] |
[((986, 1121), 'activatable_model.signals.model_activations_changed.send', 'model_activations_changed.send', (['self.model'], {'instance_ids': 'updated_instance_ids', 'is_active': 'kwargs[self.model.ACTIVATABLE_FIELD_NAME]'}), '(self.model, instance_ids=\n updated_instance_ids, is_active=kwargs[self.model.ACTIVATABLE_FIELD_NAME])\n', (1016, 1121), False, 'from activatable_model.signals import model_activations_changed\n'), ((3427, 3536), 'activatable_model.signals.model_activations_changed.send', 'model_activations_changed.send', (['self.__class__'], {'instance_ids': '[self.id]', 'is_active': 'current_activable_value'}), '(self.__class__, instance_ids=[self.id],\n is_active=current_activable_value)\n', (3457, 3536), False, 'from activatable_model.signals import model_activations_changed\n')]
|
# my_lambdata/my_mod.py
# my_lambdata.my_mod
import pandas as pd
def enlarge(num):
return num * 100
def null_check(df):
null_lines = df[df.isnull().any(axis=1)]
return null_lines
def date_divider(df,date_col):
'''
df: the whole dataframe adding new day, month, year to
date_col: the name of the column the date is stored in
'''
converted_df = df.copy()
converted_df["Year"] = pd.DatetimeIndex(converted_df[date_col]).year
converted_df["Month"] = pd.DatetimeIndex(converted_df[date_col]).month
converted_df["Day"] = pd.DatetimeIndex(converted_df[date_col]).day
return converted_df
if __name__ == "__main__":
x = 11
print(enlarge(x))
y = int(input("Please choose a number (e.g. 5)"))
print(enlarge(y))
|
[
"pandas.DatetimeIndex"
] |
[((409, 449), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['converted_df[date_col]'], {}), '(converted_df[date_col])\n', (425, 449), True, 'import pandas as pd\n'), ((482, 522), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['converted_df[date_col]'], {}), '(converted_df[date_col])\n', (498, 522), True, 'import pandas as pd\n'), ((554, 594), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['converted_df[date_col]'], {}), '(converted_df[date_col])\n', (570, 594), True, 'import pandas as pd\n')]
|
import sys
sys.path.append('./')
import os
import pandas as pd
from vtkplotter import load
from brainrender import DEFAULT_STRUCTURE_COLOR
def get_rat_regions_metadata(metadata_fld):
"""
:param metadata_fld:
"""
return pd.read_pickle(os.path.join(metadata_fld, "rat_structures.pkl"))
def get_rat_mesh_from_region(region, paths, use_original_color=False, **kwargs):
"""
:param region:
:param paths:
:param use_original_color: (Default value = False)
:param **kwargs:
"""
if not isinstance(region, (tuple, list)):
region = [region]
check = False
else: check = True
metadata = get_rat_regions_metadata(paths.metadata)
meshes = []
for reg in region:
if isinstance(reg, int):
entry = metadata.loc[metadata.Id == reg]
elif isinstance(reg, str):
entry = metadata.loc[metadata['Name'] == reg]
else:
raise ValueError("Unrecognized value for region while trying to get mesh for rat: {}".format(reg))
try:
meshname = os.path.join(paths.rat_meshes, "label_{}.stl".format(entry.Id.values[0]))
if not os.path.isfile(meshname):
raise FileExistsError(meshname)
if use_original_color:
c = entry["rgb"].values[0]
if isinstance(c, str):
c = c.replace("[", "")
c = c.replace("]", "")
cols = c.split(",")
color = [int(c) for c in cols]
else:
color = c
else:
if "color" in list(kwargs.keys()):
color = kwargs.pop("color", DEFAULT_STRUCTURE_COLOR)
elif "c" in list(kwargs.keys()):
color = kwargs.pop("c", DEFAULT_STRUCTURE_COLOR)
if "color" in list(kwargs.keys()): del kwargs["color"]
elif "c" in list(kwargs.keys()): del kwargs["c"]
mesh = load(meshname, c=color, **kwargs)
mesh = mesh.smoothLaplacian().subdivide(2)
meshes.append(mesh)
except:
print("Could not load rat region: {}".format(entry["Name"].values[0]))
return None
if not check:
return meshes[0]
else:
return meshes
if __name__ == "__main__":
pass
#fix_data() ## UNDEFINED!!??
|
[
"os.path.join",
"sys.path.append",
"vtkplotter.load",
"os.path.isfile"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (26, 32), False, 'import sys\n'), ((257, 305), 'os.path.join', 'os.path.join', (['metadata_fld', '"""rat_structures.pkl"""'], {}), "(metadata_fld, 'rat_structures.pkl')\n", (269, 305), False, 'import os\n'), ((2015, 2048), 'vtkplotter.load', 'load', (['meshname'], {'c': 'color'}), '(meshname, c=color, **kwargs)\n', (2019, 2048), False, 'from vtkplotter import load\n'), ((1185, 1209), 'os.path.isfile', 'os.path.isfile', (['meshname'], {}), '(meshname)\n', (1199, 1209), False, 'import os\n')]
|
import pandas as pd
from melusine.prepare_email.mail_segmenting import structure_email, tag_signature
structured_historic = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'être en régularisation . \n \n Merci bonne journée",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relevé d'identité postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF nécessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 à 10:20, \
<<EMAIL>> a écrit\xa0:",
},
]
output = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'être en régularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journée", "tags": "GREETINGS"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <<EMAIL>> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relevé d'identité postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers \
PDF nécessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
def test_structure_email():
input_df = pd.DataFrame({"structured_historic": [structured_historic]})
output_df = pd.Series([output])
result = input_df.apply(structure_email, axis=1)
pd.testing.assert_series_equal(result, output_df)
structured_historic_signature = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'être en régularisation . \n \n Merci bonne journée\n<NAME>",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relevé d'identité postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF nécessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 à 10:20, \
<<EMAIL>> a écrit\xa0:",
},
]
output_signature = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'être en régularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journée", "tags": "GREETINGS"},
{"part": "<NAME>", "tags": "SIGNATURE"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <<EMAIL>> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relevé d'identité postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers PDF nécessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
def test_tag_signature():
input_df = pd.DataFrame({"structured_historic": [structured_historic_signature]})
output_df = pd.Series([output_signature])
input_df["structured_body"] = input_df.apply(structure_email, axis=1)
result = input_df.apply(tag_signature, axis=1)
pd.testing.assert_series_equal(result, output_df)
|
[
"pandas.DataFrame",
"pandas.Series",
"pandas.testing.assert_series_equal"
] |
[((2304, 2364), 'pandas.DataFrame', 'pd.DataFrame', (["{'structured_historic': [structured_historic]}"], {}), "({'structured_historic': [structured_historic]})\n", (2316, 2364), True, 'import pandas as pd\n'), ((2384, 2403), 'pandas.Series', 'pd.Series', (['[output]'], {}), '([output])\n', (2393, 2403), True, 'import pandas as pd\n'), ((2465, 2514), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'output_df'], {}), '(result, output_df)\n', (2495, 2514), True, 'import pandas as pd\n'), ((4799, 4869), 'pandas.DataFrame', 'pd.DataFrame', (["{'structured_historic': [structured_historic_signature]}"], {}), "({'structured_historic': [structured_historic_signature]})\n", (4811, 4869), True, 'import pandas as pd\n'), ((4889, 4918), 'pandas.Series', 'pd.Series', (['[output_signature]'], {}), '([output_signature])\n', (4898, 4918), True, 'import pandas as pd\n'), ((5051, 5100), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'output_df'], {}), '(result, output_df)\n', (5081, 5100), True, 'import pandas as pd\n')]
|
"""
===================================
Merging two instances in the design
===================================
This example demonstrate how to merge two instance in the design to create a new
merged definition
.. hdl-diagram:: ../../../examples/basic/_initial_design_merge.v
:type: netlistsvg
:align: center
:module: top
**Output1** Merged design Instance
.. hdl-diagram:: ../../../examples/basic/_merged_design.v
:type: netlistsvg
:align: center
:module: top
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
import logging
logger = logging.getLogger('spydrnet_logs')
sdn.enable_file_logging(LOG_LEVEL='INFO')
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
sdn.compose(netlist, '_initial_design_merge.v', skip_constraints=True)
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
top = netlist.top_instance.reference
inst1 = next(top.get_instances("inst_1_0"))
inst2 = next(top.get_instances("inst_1_1"))
top.merge_instance([inst1, inst2],
new_definition_name="merged_module",
new_instance_name="merged_module_instance_0")
top.create_unconn_wires()
sdn.compose(netlist, '_merged_design.v', skip_constraints=True)
|
[
"logging.getLogger",
"spydrnet.compose",
"spydrnet_physical.load_netlist_by_name",
"spydrnet.enable_file_logging"
] |
[((594, 628), 'logging.getLogger', 'logging.getLogger', (['"""spydrnet_logs"""'], {}), "('spydrnet_logs')\n", (611, 628), False, 'import logging\n'), ((629, 670), 'spydrnet.enable_file_logging', 'sdn.enable_file_logging', ([], {'LOG_LEVEL': '"""INFO"""'}), "(LOG_LEVEL='INFO')\n", (652, 670), True, 'import spydrnet as sdn\n'), ((682, 729), 'spydrnet_physical.load_netlist_by_name', 'sdnphy.load_netlist_by_name', (['"""nested_hierarchy"""'], {}), "('nested_hierarchy')\n", (709, 729), True, 'import spydrnet_physical as sdnphy\n'), ((730, 800), 'spydrnet.compose', 'sdn.compose', (['netlist', '"""_initial_design_merge.v"""'], {'skip_constraints': '(True)'}), "(netlist, '_initial_design_merge.v', skip_constraints=True)\n", (741, 800), True, 'import spydrnet as sdn\n'), ((812, 859), 'spydrnet_physical.load_netlist_by_name', 'sdnphy.load_netlist_by_name', (['"""nested_hierarchy"""'], {}), "('nested_hierarchy')\n", (839, 859), True, 'import spydrnet_physical as sdnphy\n'), ((1169, 1232), 'spydrnet.compose', 'sdn.compose', (['netlist', '"""_merged_design.v"""'], {'skip_constraints': '(True)'}), "(netlist, '_merged_design.v', skip_constraints=True)\n", (1180, 1232), True, 'import spydrnet as sdn\n')]
|
import praw
import re
import os
reddit = praw.Reddit('Splunge Bot v1', client_id=os.environ['REDDIT_CLIENT_ID'], client_secret=os.environ['REDDIT_CLIENT_SECRET'], password=os.environ['REDDIT_PASSWORD'], username=os.environ['REDDIT_USERNAME'])
subreddit = reddit.subreddit('tubasaur')
for submission in subreddit.new(limit=5):
for top_level_comment in submission.comments:
if re.search('splunge', top_level_comment.body, re.IGNORECASE):
top_level_comment.reply("Well, yeah, splunge for me too!")
print("Splunged.")
|
[
"praw.Reddit",
"re.search"
] |
[((42, 252), 'praw.Reddit', 'praw.Reddit', (['"""Splunge Bot v1"""'], {'client_id': "os.environ['REDDIT_CLIENT_ID']", 'client_secret': "os.environ['REDDIT_CLIENT_SECRET']", 'password': "os.environ['REDDIT_PASSWORD']", 'username': "os.environ['REDDIT_USERNAME']"}), "('Splunge Bot v1', client_id=os.environ['REDDIT_CLIENT_ID'],\n client_secret=os.environ['REDDIT_CLIENT_SECRET'], password=os.environ[\n 'REDDIT_PASSWORD'], username=os.environ['REDDIT_USERNAME'])\n", (53, 252), False, 'import praw\n'), ((379, 438), 're.search', 're.search', (['"""splunge"""', 'top_level_comment.body', 're.IGNORECASE'], {}), "('splunge', top_level_comment.body, re.IGNORECASE)\n", (388, 438), False, 'import re\n')]
|
import discord
from discord import embeds
from discord.ext import commands
from discord.ext.commands.core import command
from pymongo import MongoClient, collation
from discord_components import Button, Select, SelectOption, ComponentsBot
from discord.utils import get
class managecommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Enable/disable command
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disable(self, ctx, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if role.id not in settings[command]['disabled_guild']:
settings[command]['disabled_guild'].append(role.id)
else:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
if role.id in settings[command]['guild']:
settings[command]['guild'].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" on server for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disablecategory(self, ctx, category: discord.CategoryChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(category.id) not in settings[command]['disabled_category'].keys():
settings[command]['disabled_category'][str(category.id)] = [
role.id]
else:
if role.id in settings[command]['disabled_category'][str(category.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['disabled_category'][str(
category.id)].append(role.id)
if str(category.id) in settings[command]['category'].keys():
if role.id in settings[command]['category'][str(category.id)]:
settings[command]['category'][str(category.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" in category " + category.name+" for "+role.name + category.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disablechannel(self, ctx, channel: discord.TextChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(channel.id) not in settings[command]['disabled_channel'].keys():
settings[command]['disabled_channel'][str(channel.id)] = [role.id]
else:
if role.id in settings[command]['disabled_channel'][str(channel.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['disabled_channel'][str(
channel.id)].append(role.id)
if str(channel.id) in settings[command]['channel'].keys():
if role.id in settings[command]['channel'][str(channel.id)]:
settings[command]['channel'][str(channel.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" in channel " + channel.name+" for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enable(self, ctx, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if role.id not in settings[command]['guild']:
settings[command]['guild'].append(role.id)
else:
await ctx.reply(embed=discord.Embed(title="Command is already enabled", color=0xFD3333))
return
if role.id in settings[command]['disabled_guild']:
settings[command]['disabled_guild'].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" on server for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enablecategory(self, ctx, category: discord.CategoryChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(category.id) not in settings[command]['category'].keys():
settings[command]['category'][str(category.id)] = [role.id]
else:
if role.id in settings[command]['category'][str(category.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['category'][str(category.id)].append(role.id)
if str(category.id) in settings[command]['disabled_category'].keys():
if role.id in settings[command]['disabled_category'][str(category.id)]:
settings[command]['disabled_category'][str(
category.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" in category " + category.name + " for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enablechannel(self, ctx, channel: discord.TextChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(channel.id) not in settings[command]['channel'].keys():
settings[command]['channel'][str(channel.id)] = [role.id]
else:
if role.id in settings[command]['channel'][str(channel.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['channel'][str(channel.id)].append(role.id)
if str(channel.id) in settings[command]['disabled_channel'].keys():
if role.id in settings[command]['disabled_channel'][str(channel.id)]:
settings[command]['disabled_channel'][str(
channel.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" in channel " + channel.name + " for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def resetperms(self, ctx, command: str = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}}
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Reset command permissions", color=0x00FF42))
@commands.command(pass_context=True)
async def showperms(self, ctx):
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
options=[]
for setting in settings.keys():
options.append(SelectOption(label=setting, value=setting))
message = await ctx.reply("The lower in the hiearchy will go over the other. So channel enable will go over guild disable.", components=[Select(placeholder="Select something!", options=options, custom_id="commandperms",)])
while True:
interaction = await self.bot.wait_for("select_option")
embed = discord.Embed(name="Command permissions for ", value=interaction.values[0], color=0xFFFFFF)
if len(settings[interaction.values[0]]["guild"]) > 0:
msg = ""
for roleid in settings[interaction.values[0]]["guild"]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
else:
msg="None"
embed.add_field(name="Guild wide allowed", value=msg)
if len(settings[interaction.values[0]]["guild"]) > 0:
msg = ""
for roleid in settings[interaction.values[0]]["disabled_guild"]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
else:
msg="None"
embed.add_field(name="Guild wide denied", value=msg)
# this is no longer a list
# its a dictionary
embed.add_field(name="Category wide allowed", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["category"].keys()) > 0:
for key in settings[interaction.values[0]]["category"].keys():
if len(settings[interaction.values[0]]["category"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["category"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.categories, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Category wide denied", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["disabled_category"].keys()) > 0:
for key in settings[interaction.values[0]]["disabled_category"].keys():
if len(settings[interaction.values[0]]["disabled_category"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["disabled_category"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.categories, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Channel wide allowed", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["channel"].keys()) > 0:
for key in settings[interaction.values[0]]["channel"].keys():
if len(settings[interaction.values[0]]["channel"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["channel"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.text_channels, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Channel wide denied", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["disabled_channel"].keys()) > 0:
for key in settings[interaction.values[0]]["disabled_channel"].keys():
if len(settings[interaction.values[0]]["disabled_channel"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["disabled_channel"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.text_channels, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "There "
await message.edit(embed=embed,components=[Select(placeholder="Select something!", options=options, custom_id="commandperms",)])
def setup(bot):
bot.add_cog(managecommands(bot))
def perms(context):
command = context.command.name #str
guild_id = context.guild.id
channel_id = str(context.message.channel.id)
category_id = str(context.message.channel.category_id)
roles = []
for role in context.author.roles:
roles.append(role.id)
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": guild_id}
settings = collection.find_one(myquery)["settings"]
if command in settings.keys():
if channel_id in settings[command]["channel"].keys():
print("channels exist")
if bool(set(roles) & set(settings[command]["channel"][channel_id])):
return True
elif channel_id in settings[command]["disabled_channel"].keys():
if bool(set(roles) & set(settings[command]["disabled_channel"][channel_id])):
return False
elif category_id in settings[command]["category"].keys():
if bool(set(roles) & set(settings[command]["category"][category_id])):
return True
elif category_id in settings[command]["disabled_category"].keys():
if bool(set(roles) & set(settings[command]["disabled_category"][category_id])):
return False
elif bool(set(roles) & set(settings[command]["disabled_guild"])):
return False
elif bool(set(roles) & set(settings[command]["guild"])):
return True
return True
|
[
"discord.ext.commands.has_permissions",
"discord_components.SelectOption",
"discord.utils.get",
"discord_components.Select",
"pymongo.MongoClient",
"discord.Embed",
"discord.ext.commands.command"
] |
[((395, 430), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (411, 430), False, 'from discord.ext import commands\n'), ((436, 479), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (460, 479), False, 'from discord.ext import commands\n'), ((2001, 2036), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (2017, 2036), False, 'from discord.ext import commands\n'), ((2042, 2085), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (2066, 2085), False, 'from discord.ext import commands\n'), ((4090, 4125), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (4106, 4125), False, 'from discord.ext import commands\n'), ((4131, 4174), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (4155, 4174), False, 'from discord.ext import commands\n'), ((6124, 6159), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (6140, 6159), False, 'from discord.ext import commands\n'), ((6165, 6208), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (6189, 6208), False, 'from discord.ext import commands\n'), ((7728, 7763), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (7744, 7763), False, 'from discord.ext import commands\n'), ((7769, 7812), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (7793, 7812), False, 'from discord.ext import commands\n'), ((9775, 9810), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (9791, 9810), False, 'from discord.ext import commands\n'), ((9816, 9859), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (9840, 9859), False, 'from discord.ext import commands\n'), ((11804, 11839), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (11820, 11839), False, 'from discord.ext import commands\n'), ((11845, 11888), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (11869, 11888), False, 'from discord.ext import commands\n'), ((12860, 12895), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12876, 12895), False, 'from discord.ext import commands\n'), ((13567, 13662), 'discord.Embed', 'discord.Embed', ([], {'name': '"""Command permissions for """', 'value': 'interaction.values[0]', 'color': '(16777215)'}), "(name='Command permissions for ', value=interaction.values[0],\n color=16777215)\n", (13580, 13662), False, 'import discord\n'), ((18224, 18255), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (18235, 18255), False, 'from pymongo import MongoClient, collation\n'), ((959, 990), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (970, 990), False, 'from pymongo import MongoClient, collation\n'), ((2616, 2647), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (2627, 2647), False, 'from pymongo import MongoClient, collation\n'), ((4699, 4730), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (4710, 4730), False, 'from pymongo import MongoClient, collation\n'), ((6687, 6718), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (6698, 6718), False, 'from pymongo import MongoClient, collation\n'), ((8342, 8373), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (8353, 8373), False, 'from pymongo import MongoClient, collation\n'), ((10383, 10414), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (10394, 10414), False, 'from pymongo import MongoClient, collation\n'), ((12277, 12308), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (12288, 12308), False, 'from pymongo import MongoClient, collation\n'), ((12953, 12984), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (12964, 12984), False, 'from pymongo import MongoClient, collation\n'), ((13184, 13226), 'discord_components.SelectOption', 'SelectOption', ([], {'label': 'setting', 'value': 'setting'}), '(label=setting, value=setting)\n', (13196, 13226), False, 'from discord_components import Button, Select, SelectOption, ComponentsBot\n'), ((1909, 2000), 'discord.Embed', 'discord.Embed', ([], {'title': "('Disabled ' + command + ' on server for ' + role.name)", 'color': '(65346)'}), "(title='Disabled ' + command + ' on server for ' + role.name,\n color=65346)\n", (1922, 2000), False, 'import discord\n'), ((3960, 4091), 'discord.Embed', 'discord.Embed', ([], {'title': "('Disabled ' + command + ' in category ' + category.name + ' for ' + role.\n name + category.name)", 'color': '(65346)'}), "(title='Disabled ' + command + ' in category ' + category.name +\n ' for ' + role.name + category.name, color=65346)\n", (3973, 4091), False, 'import discord\n'), ((6012, 6125), 'discord.Embed', 'discord.Embed', ([], {'title': "('Disabled ' + command + ' in channel ' + channel.name + ' for ' + role.name)", 'color': '(65346)'}), "(title='Disabled ' + command + ' in channel ' + channel.name +\n ' for ' + role.name, color=65346)\n", (6025, 6125), False, 'import discord\n'), ((7637, 7727), 'discord.Embed', 'discord.Embed', ([], {'title': "('Enabled ' + command + ' on server for ' + role.name)", 'color': '(65346)'}), "(title='Enabled ' + command + ' on server for ' + role.name,\n color=65346)\n", (7650, 7727), False, 'import discord\n'), ((9660, 9774), 'discord.Embed', 'discord.Embed', ([], {'title': "('Enabled ' + command + ' in category ' + category.name + ' for ' + role.name)", 'color': '(65346)'}), "(title='Enabled ' + command + ' in category ' + category.name +\n ' for ' + role.name, color=65346)\n", (9673, 9774), False, 'import discord\n'), ((11687, 11799), 'discord.Embed', 'discord.Embed', ([], {'title': "('Enabled ' + command + ' in channel ' + channel.name + ' for ' + role.name)", 'color': '(65346)'}), "(title='Enabled ' + command + ' in channel ' + channel.name +\n ' for ' + role.name, color=65346)\n", (11700, 11799), False, 'import discord\n'), ((12788, 12849), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Reset command permissions"""', 'color': '(65346)'}), "(title='Reset command permissions', color=65346)\n", (12801, 12849), False, 'import discord\n'), ((13854, 13885), 'discord.utils.get', 'get', (['ctx.guild.roles'], {'id': 'roleid'}), '(ctx.guild.roles, id=roleid)\n', (13857, 13885), False, 'from discord.utils import get\n'), ((14246, 14277), 'discord.utils.get', 'get', (['ctx.guild.roles'], {'id': 'roleid'}), '(ctx.guild.roles, id=roleid)\n', (14249, 14277), False, 'from discord.utils import get\n'), ((786, 848), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (799, 848), False, 'import discord\n'), ((1585, 1651), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Command is already disabled"""', 'color': '(16593715)'}), "(title='Command is already disabled', color=16593715)\n", (1598, 1651), False, 'import discord\n'), ((2443, 2505), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (2456, 2505), False, 'import discord\n'), ((4526, 4588), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (4539, 4588), False, 'import discord\n'), ((6514, 6576), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (6527, 6576), False, 'import discord\n'), ((7295, 7360), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Command is already enabled"""', 'color': '(16593715)'}), "(title='Command is already enabled', color=16593715)\n", (7308, 7360), False, 'import discord\n'), ((8169, 8231), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (8182, 8231), False, 'import discord\n'), ((10210, 10272), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (10223, 10272), False, 'import discord\n'), ((12172, 12234), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Provide a valid command"""', 'color': '(16593715)'}), "(title='Provide a valid command', color=16593715)\n", (12185, 12234), False, 'import discord\n'), ((13374, 13461), 'discord_components.Select', 'Select', ([], {'placeholder': '"""Select something!"""', 'options': 'options', 'custom_id': '"""commandperms"""'}), "(placeholder='Select something!', options=options, custom_id=\n 'commandperms')\n", (13380, 13461), False, 'from discord_components import Button, Select, SelectOption, ComponentsBot\n'), ((15035, 15066), 'discord.utils.get', 'get', (['ctx.guild.roles'], {'id': 'roleid'}), '(ctx.guild.roles, id=roleid)\n', (15038, 15066), False, 'from discord.utils import get\n'), ((15852, 15883), 'discord.utils.get', 'get', (['ctx.guild.roles'], {'id': 'roleid'}), '(ctx.guild.roles, id=roleid)\n', (15855, 15883), False, 'from discord.utils import get\n'), ((16611, 16642), 'discord.utils.get', 'get', (['ctx.guild.roles'], {'id': 'roleid'}), '(ctx.guild.roles, id=roleid)\n', (16614, 16642), False, 'from discord.utils import get\n'), ((17406, 17437), 'discord.utils.get', 'get', (['ctx.guild.roles'], {'id': 'roleid'}), '(ctx.guild.roles, id=roleid)\n', (17409, 17437), False, 'from discord.utils import get\n'), ((3384, 3450), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Command is already disabled"""', 'color': '(16593715)'}), "(title='Command is already disabled', color=16593715)\n", (3397, 3450), False, 'import discord\n'), ((5444, 5510), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Command is already disabled"""', 'color': '(16593715)'}), "(title='Command is already disabled', color=16593715)\n", (5457, 5510), False, 'import discord\n'), ((9066, 9132), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Command is already disabled"""', 'color': '(16593715)'}), "(title='Command is already disabled', color=16593715)\n", (9079, 9132), False, 'import discord\n'), ((11101, 11167), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Command is already disabled"""', 'color': '(16593715)'}), "(title='Command is already disabled', color=16593715)\n", (11114, 11167), False, 'import discord\n'), ((17738, 17825), 'discord_components.Select', 'Select', ([], {'placeholder': '"""Select something!"""', 'options': 'options', 'custom_id': '"""commandperms"""'}), "(placeholder='Select something!', options=options, custom_id=\n 'commandperms')\n", (17744, 17825), False, 'from discord_components import Button, Select, SelectOption, ComponentsBot\n')]
|
import unittest
import mock
from src.api.resources import log
from tests import LOGS_PATH
class TestLogListResource(unittest.TestCase):
def setUp(self):
self.class_ = log.LogListResource()
def test_post_with_file_that_exits(self):
class FakeRequest:
@staticmethod
def get_json():
return {"logs-file": LOGS_PATH}
with mock.patch("src.api.resources.log.flask.request", FakeRequest):
result = self.class_.post()
self.assertEqual(3, len(result.json))
self.assertEqual("200 OK", result.status)
def test_post_with_file_that_does_not_exit(self):
class FakeRequest:
@staticmethod
def get_json():
return {"logs-file": "/foo/bar"}
with self.assertRaises(FileNotFoundError) as cm:
with mock.patch("src.api.resources.log.flask.request", FakeRequest):
self.class_.post()
the_exception = cm.exception
self.assertIsInstance(the_exception, FileNotFoundError)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"src.api.resources.log.LogListResource",
"mock.patch"
] |
[((1095, 1110), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1108, 1110), False, 'import unittest\n'), ((182, 203), 'src.api.resources.log.LogListResource', 'log.LogListResource', ([], {}), '()\n', (201, 203), False, 'from src.api.resources import log\n'), ((394, 456), 'mock.patch', 'mock.patch', (['"""src.api.resources.log.flask.request"""', 'FakeRequest'], {}), "('src.api.resources.log.flask.request', FakeRequest)\n", (404, 456), False, 'import mock\n'), ((862, 924), 'mock.patch', 'mock.patch', (['"""src.api.resources.log.flask.request"""', 'FakeRequest'], {}), "('src.api.resources.log.flask.request', FakeRequest)\n", (872, 924), False, 'import mock\n')]
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import threading
class MessageLoaderThread(threading.Thread):
"""
Waits for a new playhead position on the given topic, then loads the message at that position and notifies the view threads.
One thread per topic. Maintains a cache of recently loaded messages.
"""
def __init__(self, timeline, topic):
threading.Thread.__init__(self)
self.timeline = timeline
self.topic = topic
self.topic_playhead_position = None
self._message_cache_capacity = 50
self._message_cache = {}
self._message_cache_keys = []
self._stop_flag = False
self.setDaemon(True)
self.start()
def reset(self):
self.bag_playhead_position = None
def run(self):
while not self._stop_flag:
# Wait for a new entry
cv = self.timeline._playhead_positions_cvs[self.topic]
with cv:
while (self.topic not in self.timeline._playhead_positions) or (self.topic_playhead_position == self.timeline._playhead_positions[self.topic]):
cv.wait()
if self._stop_flag:
return
playhead_position = self.timeline._playhead_positions[self.topic]
self.topic_playhead_position = playhead_position
# Don't bother loading the message if there are no listeners
if not self.timeline.has_listeners(self.topic):
continue
# Load the message
if playhead_position is None:
msg_data = None
else:
msg_data = self._get_message(playhead_position)
# Inform the views
messages_cv = self.timeline._messages_cvs[self.topic]
with messages_cv:
self.timeline._messages[self.topic] = msg_data
messages_cv.notify_all() # notify all views that a message is loaded
def _get_message(self, position):
key = str(position)
if key in self._message_cache:
return self._message_cache[key]
msg_data = self.timeline.read_message(self.topic, position)
self._message_cache[key] = msg_data
self._message_cache_keys.append(key)
if len(self._message_cache) > self._message_cache_capacity:
oldest_key = self._message_cache_keys[0]
del self._message_cache[oldest_key]
self._message_cache_keys.remove(oldest_key)
return msg_data
def stop(self):
self._stop_flag = True
cv = self.timeline._playhead_positions_cvs[self.topic]
with cv:
print("DJS: self.timeline._playhead_positions_cvs[self.topic].notify_all() [MessageLoader:stop")
cv.notify_all()
|
[
"threading.Thread.__init__"
] |
[((1924, 1955), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1949, 1955), False, 'import threading\n')]
|
import json
import regex
import nltk.data
from nltk.tokenize import word_tokenize
import sys
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def tokenize(string):
return word_tokenize(string)
def split_paragraphs(text):
"""
remove urls, lowercase all words and separate paragraphs
"""
splits = regex.split(r'\n+', text)
paras = []
for split in splits[1:]: # skip the titles
split = split.strip()
if len(split) == 0:
continue
if 'Section::' in split:
continue
paras.append(split)
paras = " ".join(paras)
return sent_detector.tokenize(paras)
def split_sent(sent):
strings = regex.split('<a |</a>', sent)
new_strings = []
count = 0
for s in strings:
s = s.strip()
if s:
if 'href=' in s:
s = s.lstrip('href="')
href, text = s.split('">')
new_strings.append((text, href))
count += 1
else:
ss = tokenize(s)
new_strings.extend([(_, None) for _ in ss])
return new_strings, count / len(new_strings), count
fw = open('out-more.json', 'w')
with open('en.json', 'r') as f:
for i, line in enumerate(f):
data = json.loads(line)
entry = {"id": data['id'], "url": data['url'], 'title': data['title']}
outputs = []
if len(data['text']) > 50:
try:
sents = split_paragraphs(data['text'])
for sent in sents:
if len(sent) < 400:
output, ratio, count = split_sent(sent)
if count > 1 and ratio >= 0.10 and len(output) >= 8 and output[0][0][0].isupper():
text = [_[0] for _ in output]
hyperlink = [_[1] for _ in output]
outputs.append((text, hyperlink))
except Exception:
pass
if len(outputs) > 0:
entry['text'] = outputs
fw.write(json.dumps(entry) + '\n')
sys.stdout.write('finished {}/{} \r'.format(i, 5989879))
fw.close()
|
[
"json.dumps",
"json.loads",
"regex.split",
"nltk.tokenize.word_tokenize"
] |
[((194, 215), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['string'], {}), '(string)\n', (207, 215), False, 'from nltk.tokenize import word_tokenize\n'), ((335, 360), 'regex.split', 'regex.split', (['"""\\\\n+"""', 'text'], {}), "('\\\\n+', text)\n", (346, 360), False, 'import regex\n'), ((695, 724), 'regex.split', 'regex.split', (['"""<a |</a>"""', 'sent'], {}), "('<a |</a>', sent)\n", (706, 724), False, 'import regex\n'), ((1302, 1318), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1312, 1318), False, 'import json\n'), ((2105, 2122), 'json.dumps', 'json.dumps', (['entry'], {}), '(entry)\n', (2115, 2122), False, 'import json\n')]
|
from helpers.concurrency import execute
from scaleapi import exceptions
def upsert(client, project_name, batches):
print("\n\nCreating Batches...")
print("===================")
def upsert_batch(desired_batch):
batch_name = desired_batch['name']
batch_callback_url = desired_batch['callback_url']
try:
current_batch = client.get_batch(desired_batch['name'])
# Batch already exists - validate is still in "staging" mode
if (not batches.get('batchStatusOverride', False) and current_batch.status != 'staging'):
raise(Exception(
f"❌ Trying to submit to a non-staging batch, '{desired_batch['name']}' is in status '{current_batch.status}' | Exiting now"))
return f"✅ Batch '{desired_batch['name']}' already exists, skipping"
except exceptions.ScaleResourceNotFound as err:
try:
new_batch = client.create_batch(
project_name, batch_name, batch_callback_url)
return f"✅ Successfully created batch `{desired_batch['name']}`"
except exceptions.ScaleException as err:
return f"❌ Batch creation for '{desired_batch['name']}' failed <Status Code {err.code}: {err.message}>"
except exceptions.ScaleException as err:
return f"❌ Batch fetch for '{desired_batch['name']}' failed <Status Code {err.code}: {err.message}>"
execute(upsert_batch, batches['batches'])
def finalize(client, batches):
print("\n\nFinalizing Batches...")
print("=====================")
def finalize_batch(batch):
batch_name = batch["name"]
# See if this batch was already finalized (finalizing again gives bad request)
try:
batch = client.get_batch(batch_name)
if (batch.status == 'in_progress'):
return f"✅ Batch '{batch_name}' was already finalized, skipping"
batch.finalize()
return f"✅ Succesfuly finalized batch '{batch_name}'"
except exceptions.ScaleException as err:
return f"❌ Attempt to finalize batch '{batch_name}' failed <Status Code {err.code}: {err.message}>"
execute(finalize_batch, batches['batches'])
|
[
"helpers.concurrency.execute"
] |
[((1458, 1499), 'helpers.concurrency.execute', 'execute', (['upsert_batch', "batches['batches']"], {}), "(upsert_batch, batches['batches'])\n", (1465, 1499), False, 'from helpers.concurrency import execute\n'), ((2217, 2260), 'helpers.concurrency.execute', 'execute', (['finalize_batch', "batches['batches']"], {}), "(finalize_batch, batches['batches'])\n", (2224, 2260), False, 'from helpers.concurrency import execute\n')]
|
from zzcore import StdAns, mysakuya
import requests
class Ans(StdAns):
def GETMSG(self):
msg=''
try:
msg += xs()
except:
msg += '可能是机器人笑死了!'
return msg
def xs():
url = "http://api-x.aya1.xyz:6/"
text = requests.get(url=url).text
return text
|
[
"requests.get"
] |
[((272, 293), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (284, 293), False, 'import requests\n')]
|
import copy
import datetime
import importlib
import logging
import operator
import re
from calendar import different_locale
import translations
from data_source.game_data import GameData
from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, \
UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES
from models.bookmark import Bookmark
from models.toplist import Toplist
from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day
LOGLEVEL = logging.DEBUG
formatter = logging.Formatter('%(asctime)-15s [%(levelname)s] %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(LOGLEVEL)
log = logging.getLogger(__name__)
log.setLevel(LOGLEVEL)
log.addHandler(handler)
t = translations.Translations()
_ = t.get
def update_translations():
global _
importlib.reload(translations)
del _
_ = translations.Translations().get
class TeamExpander:
def __init__(self):
world = GameData()
world.populate_world_data()
self.troops = world.troops
self.troop_types = world.troop_types
self.spells = world.spells
self.effects = world.effects
self.positive_effects = world.positive_effects
self.weapons = world.weapons
self.classes = world.classes
self.banners = world.banners
self.traits = world.traits
self.kingdoms = world.kingdoms
self.pet_effects = world.pet_effects
self.pets = world.pets
self.talent_trees = world.talent_trees
self.spoilers = world.spoilers
self.events = world.events
self.campaign_tasks = world.campaign_tasks
self.reroll_tasks = world.campaign_rerolls
self.soulforge = world.soulforge
self.traitstones = world.traitstones
self.levels = world.levels
self.rooms = {}
self.toplists = Toplist()
self.bookmarks = Bookmark()
self.adventure_board = world.adventure_board
self.drop_chances = world.drop_chances
self.event_kingdoms = world.event_kingdoms
self.weekly_event = world.weekly_event
self.active_gems = world.gem_events
@classmethod
def extract_code_from_message(cls, raw_code):
numbers = [int(n.strip()) for n in raw_code.split(',') if n]
return numbers
def get_team_from_code(self, code, lang):
result = {
'troops': [],
'banner': {},
'class': None,
'talents': [],
'class_title': _('[CLASS]', lang),
'troops_title': _('[TROOPS]', lang),
}
has_weapon = False
has_class = False
for i, element in enumerate(code):
troop = self.troops.get(element)
weapon = self.weapons.get(element)
if troop:
troop = troop.copy()
self.translate_troop(troop, lang)
result['troops'].append(troop)
continue
elif weapon:
weapon = weapon.copy()
self.translate_weapon(weapon, lang)
result['troops'].append(weapon)
has_weapon = True
continue
_class = self.classes.get(element)
if _class:
result['class'] = _(_class['name'], lang)
result['class_talents'] = _class['talents']
has_class = True
continue
banner = self.banners.get(element)
if banner:
result['banner'] = self.translate_banner(banner, lang)
continue
if 0 <= element <= 3:
result['talents'].append(element)
continue
if i <= 3:
result['troops'].append(self.troops['`?`'])
continue
elif i == 4:
banner = {
'colors': [('questionmark', 1)],
'name': '[REQUIREMENTS_NOT_MET]',
'filename': 'Locked',
'id': '`?`'
}
result['banner'] = self.translate_banner(banner, lang)
continue
elif i == 12:
result['class'] = _('[REQUIREMENTS_NOT_MET]', lang)
result['talents'] = []
has_class = True
continue
if has_weapon and has_class:
new_talents = []
for talent_no, talent_code in enumerate(result['talents']):
talent = '-'
if talent_code > 0:
talent = _(result['class_talents'][talent_code - 1][talent_no]['name'], lang)
new_talents.append(talent)
result['talents'] = new_talents
else:
result['class'] = None
result['talents'] = None
return result
def get_team_from_message(self, user_code, lang):
code = self.extract_code_from_message(user_code)
if not code:
return
return self.get_team_from_code(code, lang)
@staticmethod
def search_item(search_term, lang, items, lookup_keys, translator, sort_by='name'):
if search_term.isdigit() and int(search_term) in items:
item = items.get(int(search_term))
if item:
result = item.copy()
translator(result, lang)
return [result]
return []
possible_matches = []
for base_item in items.values():
if base_item['name'] == '`?`' or base_item['id'] == '`?`':
continue
item = base_item.copy()
translator(item, lang)
lookups = {
k: extract_search_tag(dig(item, k)) for k in lookup_keys
}
real_search = extract_search_tag(search_term)
if real_search == extract_search_tag(item['name']):
return [item]
for key, lookup in lookups.items():
if real_search in lookup:
possible_matches.append(item)
break
return sorted(possible_matches, key=operator.itemgetter(sort_by))
def search_troop(self, search_term, lang):
lookup_keys = [
'name',
'kingdom',
'type',
'roles',
'spell.description',
]
return self.search_item(search_term, lang,
items=self.troops,
lookup_keys=lookup_keys,
translator=self.translate_troop)
def translate_troop(self, troop, lang):
troop['name'] = _(troop['name'], lang)
if self.is_untranslated(troop['name']):
troop['name'] = troop['reference_name']
troop['description'] = _(troop['description'], lang).replace('widerbeleben',
'wiederbeleben')
troop['color_code'] = "".join(troop['colors'])
troop['rarity_title'] = _('[RARITY]', lang)
troop['raw_rarity'] = troop['rarity']
rarity_number = 1
if troop['rarity'] in TROOP_RARITIES:
rarity_number = TROOP_RARITIES.index(troop['rarity'])
troop['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
troop['traits_title'] = _('[TRAITS]', lang)
troop['traits'] = self.enrich_traits(troop['traits'], lang)
troop['roles_title'] = _('[TROOP_ROLE]', lang)
troop['roles'] = [_(f'[TROOP_ROLE_{role.upper()}]', lang) for role in troop['roles']]
troop['type_title'] = _('[FILTER_TROOPTYPE]', lang)
troop['raw_types'] = troop['types']
types = [
_(f'[TROOPTYPE_{_type.upper()}]', lang) for _type in troop['types']
]
troop['type'] = ' / '.join(types)
troop['kingdom_title'] = _('[KINGDOM]', lang)
reference_name = troop['kingdom'].get('reference_name', troop['kingdom']['name'])
troop['kingdom'] = _(troop['kingdom']['name'], lang)
if self.is_untranslated(troop['kingdom']):
troop['kingdom'] = reference_name
troop['spell'] = self.translate_spell(troop['spell_id'], lang)
troop['spell_title'] = _('[TROOPHELP_SPELL0]', lang)
self.translate_traitstones(troop, lang)
troop['bonuses_title'] = _('[BONUSES]', lang)
@staticmethod
def translate_traitstones(item, lang):
item['traitstones_title'] = _('[SOULFORGE_TAB_TRAITSTONES]', lang)
if 'traitstones' not in item:
item['traitstones'] = []
traitstones = []
for rune in item['traitstones']:
traitstones.append(f'{_(rune["name"], lang)} ({rune["amount"]})')
item['traitstones'] = traitstones
@staticmethod
def enrich_traits(traits, lang):
new_traits = []
for trait in traits:
new_trait = trait.copy()
new_trait['name'] = _(trait['name'], lang)
new_trait['description'] = _(trait['description'], lang)
new_traits.append(new_trait)
return new_traits
def search_kingdom(self, search_term, lang, include_warband=True):
lookup_keys = ['name']
return self.search_item(search_term, lang, items=self.kingdoms, lookup_keys=lookup_keys,
translator=self.translate_kingdom)
def kingdom_summary(self, lang):
kingdoms = [k.copy() for k in self.kingdoms.values() if k['location'] == 'krystara' and len(k['colors']) > 0]
for kingdom in kingdoms:
self.translate_kingdom(kingdom, lang)
return sorted(kingdoms, key=operator.itemgetter('name'))
def translate_kingdom(self, kingdom, lang):
kingdom['name'] = _(kingdom['name'], lang)
if self.is_untranslated(kingdom['name']):
kingdom['name'] = kingdom['reference_name']
kingdom['description'] = _(kingdom['description'], lang)
kingdom['punchline'] = _(kingdom['punchline'], lang)
kingdom['troop_title'] = _('[TROOPS]', lang)
kingdom['troops'] = []
for troop_id in kingdom['troop_ids']:
if troop_id not in self.troops:
continue
troop = self.troops[troop_id].copy()
self.translate_troop(troop, lang)
kingdom['troops'].append(troop)
kingdom['troops'] = sorted(kingdom['troops'], key=operator.itemgetter('name'))
kingdom['weapons_title'] = _('[WEAPONS:]', lang)
kingdom['weapons'] = sorted([
{'name': _(self.weapons[_id]['name'], lang),
'id': _id
} for _id in kingdom['weapon_ids']
], key=operator.itemgetter('name'))
kingdom['banner_title'] = _('[BANNERS]', lang)
kingdom['banner'] = self.translate_banner(self.banners[kingdom['id']], lang)
kingdom['linked_kingdom'] = None
if kingdom['linked_kingdom_id']:
kingdom['linked_kingdom'] = _(self.kingdoms[kingdom['linked_kingdom_id']]['name'], lang)
if kingdom['linked_kingdom'] and self.is_untranslated(kingdom['linked_kingdom']):
kingdom['linked_kingdom'] = None
kingdom['map'] = _('[MAPNAME_MAIN]', lang)
kingdom['linked_map'] = _('[MAPNAME_UNDERWORLD]', lang)
if kingdom['underworld']:
kingdom['map'] = _('[MAPNAME_UNDERWORLD]', lang)
kingdom['linked_map'] = _('[MAPNAME_MAIN]', lang)
if 'primary_color' in kingdom:
deed_num = COLORS.index(kingdom['primary_color'])
kingdom['deed'] = _(f'[DEED{deed_num:02d}]', lang)
kingdom['color_title'] = _('[GEM_MASTERY]', lang)
kingdom['stat_title'] = _('[STAT_BONUS]', lang)
if 'class_id' in kingdom:
kingdom['class_title'] = _('[CLASS]', lang)
kingdom['class'] = _(self.classes[kingdom['class_id']]['name'], lang)
if 'primary_stat' in kingdom:
kingdom['primary_stat'] = _(f'[{kingdom["primary_stat"].upper()}]', lang)
if 'pet' in kingdom:
kingdom['pet_title'] = _('[PET_RESCUE_PET]', lang)
kingdom['pet'] = kingdom['pet'].translations[lang]
if 'event_weapon' in kingdom:
kingdom['event_weapon_title'] = _('[FACTION_WEAPON]', lang)
kingdom['event_weapon_id'] = kingdom['event_weapon']['id']
event_weapon = kingdom['event_weapon'].copy()
self.translate_weapon(event_weapon, lang)
kingdom['event_weapon'] = event_weapon
kingdom['max_power_level_title'] = _('[KINGDOM_POWER_LEVELS]', lang)
def search_class(self, search_term, lang):
lookup_keys = ['name']
return self.search_item(search_term, lang,
items=self.classes,
translator=self.translate_class,
lookup_keys=lookup_keys)
def class_summary(self, lang):
classes = [c.copy() for c in self.classes.values()]
for c in classes:
self.translate_class(c, lang)
return sorted(classes, key=operator.itemgetter('name'))
def translate_class(self, _class, lang):
kingdom = self.kingdoms[_class['kingdom_id']]
_class['kingdom'] = _(kingdom['name'], lang, default=kingdom['reference_name'])
weapon = self.weapons[_class['weapon_id']]
_class['weapon'] = _(weapon['name'], lang)
_class['name'] = _(_class['name'], lang)
translated_trees = []
for tree in _class['talents']:
translated_talents = []
for talent in tree:
translated_talents.append({
'name': _(talent['name'], lang),
'description': _(talent['description'], lang)
})
translated_trees.append(translated_talents)
self.translate_traitstones(_class, lang)
_class['talents_title'] = _('[TALENT_TREES]', lang)
_class['kingdom_title'] = _('[KINGDOM]', lang)
_class['traits_title'] = _('[TRAITS]', lang)
_class['traits'] = self.enrich_traits(_class['traits'], lang)
_class['weapon_title'] = _('[WEAPON]', lang)
_class['talents'] = translated_trees
_class['trees'] = [_(f'[TALENT_TREE_{t.upper()}]', lang) for t in _class['trees']]
_class['type_short'] = _(f'[TROOPTYPE_{_class["type"].upper()}]', lang)
_class['type'] = _(f'[PERK_TYPE_{_class["type"].upper()}]', lang)
_class['weapon_bonus'] = _('[MAGIC_BONUS]', lang) + " " + _(
f'[MAGIC_BONUS_{COLORS.index(_class["weapon_color"])}]', lang)
def search_talent(self, search_term, lang):
possible_matches = []
for tree in self.talent_trees.values():
translated_name = extract_search_tag(_(tree['name'], lang))
translated_talents = [_(t['name'], lang) for t in tree['talents']]
talents_search_tags = [extract_search_tag(t) for t in translated_talents]
real_search = extract_search_tag(search_term)
if real_search == translated_name or real_search in talents_search_tags:
result = tree.copy()
self.translate_talent_tree(result, lang)
return [result]
elif real_search in translated_name:
result = tree.copy()
self.translate_talent_tree(result, lang)
possible_matches.append(result)
else:
talent_matches = [t for t in talents_search_tags if real_search in t]
if talent_matches:
result = tree.copy()
result['talent_matches'] = talent_matches
self.translate_talent_tree(result, lang)
possible_matches.append(result)
return sorted(possible_matches, key=operator.itemgetter('name'))
@staticmethod
def translate_talent_tree(tree, lang):
tree['talents_title'] = _('[TALENT_TREES]', lang)
tree['name'] = _(tree['name'], lang)
translated_talents = []
for talent in tree['talents']:
translated_talents.append({
'name': _(talent['name'], lang),
'description': _(talent['description'], lang)
})
tree['talents'] = translated_talents
tree['classes'] = [
{'id': c['id'],
'name': _(c['name'], lang)
}
for c in tree['classes']
]
def get_troops_with_trait(self, trait, lang):
return self.get_objects_by_trait(trait, self.troops, self.translate_troop, lang)
def get_classes_with_trait(self, trait, lang):
return self.get_objects_by_trait(trait, self.classes, self.translate_class, lang)
@staticmethod
def get_objects_by_trait(trait, objects, translator, lang):
result = []
for o in objects.values():
trait_codes = [t['code'] for t in o['traits']] if 'traits' in o else []
if trait['code'] in trait_codes:
translated_object = o.copy()
translator(translated_object, lang)
result.append(translated_object)
return result
def search_trait(self, search_term, lang):
possible_matches = []
for code, trait in self.traits.items():
translated_name = extract_search_tag(_(trait['name'], lang))
translated_description = extract_search_tag(_(trait['description'], lang))
real_search = extract_search_tag(search_term)
if real_search == translated_name:
result = trait.copy()
result['troops'] = self.get_troops_with_trait(trait, lang)
result['troops_title'] = _('[TROOPS]', lang)
result['classes'] = self.get_classes_with_trait(trait, lang)
result['classes_title'] = _('[CLASS]', lang)
if result['troops'] or result['classes']:
return self.enrich_traits([result], lang)
elif real_search in translated_name or real_search in translated_description:
result = trait.copy()
result['troops'] = self.get_troops_with_trait(trait, lang)
result['troops_title'] = _('[TROOPS]', lang)
result['classes'] = self.get_classes_with_trait(trait, lang)
result['classes_title'] = _('[CLASS]', lang)
if result['troops'] or result['classes']:
possible_matches.append(result)
return sorted(self.enrich_traits(possible_matches, lang), key=operator.itemgetter('name'))
def search_pet(self, search_term, lang):
return self.pets.search(search_term, lang)
def search_weapon(self, search_term, lang):
lookup_keys = [
'name',
'type',
'roles',
'spell.description',
]
return self.search_item(search_term, lang,
items=self.weapons,
lookup_keys=lookup_keys,
translator=self.translate_weapon)
def translate_weapon(self, weapon, lang):
weapon['name'] = _(weapon['name'], lang)
weapon['description'] = _(weapon['description'], lang)
weapon['color_code'] = "".join(sorted(weapon['colors']))
weapon['spell_title'] = _('[TROOPHELP_SPELL0]', lang)
weapon['rarity_title'] = _('[RARITY]', lang)
weapon['raw_rarity'] = weapon['rarity']
rarity_number = WEAPON_RARITIES.index(weapon['rarity'])
weapon['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
weapon['spell'] = self.translate_spell(weapon['spell_id'], lang)
weapon['upgrade_title'] = _('[UPGRADE_WEAPON]', lang)
bonus_title = _('[BONUS]', lang)
upgrade_numbers = zip(weapon['armor_increase'], weapon['attack_increase'], weapon['health_increase'],
weapon['magic_increase'])
upgrade_titles = (
_('[ARMOR]', lang),
_('[ATTACK]', lang),
_('[LIFE]', lang),
_('[MAGIC]', lang),
)
upgrades = []
for upgrade in upgrade_numbers:
for i, amount in enumerate(upgrade):
if amount:
upgrades.append(
{'name': f'{upgrade_titles[i]} {bonus_title}',
'description': f'+{amount} {upgrade_titles[i]}'})
weapon['upgrades'] = upgrades + [self.translate_spell(spell['id'], lang) for spell in weapon['affixes']]
weapon['kingdom_title'] = _('[KINGDOM]', lang)
weapon['kingdom_id'] = weapon['kingdom']['id']
weapon['kingdom'] = _(weapon['kingdom']['name'], lang)
weapon['roles_title'] = _('[WEAPON_ROLE]', lang)
weapon['roles'] = [_(f'[TROOP_ROLE_{role.upper()}]', lang) for role in weapon['roles']]
weapon['type_title'] = _('[FILTER_WEAPONTYPE]', lang)
weapon['type'] = _(f'[WEAPONTYPE_{weapon["type"].upper()}]', lang)
weapon['has_mastery_requirement_color'] = False
if weapon['requirement'] < 1000:
weapon['requirement_text'] = _('[WEAPON_MASTERY_REQUIRED]', lang) + \
str(weapon['requirement'])
weapon['has_mastery_requirement_color'] = True
elif weapon['requirement'] == 1000:
weapon['requirement_text'] = _('[WEAPON_AVAILABLE_FROM_CHESTS_AND_EVENTS]', lang)
elif weapon['requirement'] == 1002:
_class = _(weapon.get('class', '[NO_CLASS]'), lang)
weapon['requirement_text'] = _('[CLASS_REWARD_TITLE]', lang) + f' ({_class})'
elif weapon['requirement'] == 1003:
weapon['requirement_text'] = _('[SOULFORGE_WEAPONS_TAB_EMPTY_ERROR]', lang)
if weapon.get('event_faction'):
weapon['requirement_text'] += ' (' + _(f'[{weapon["event_faction"]}_NAME]', lang) + ' ' + _(
'[FACTION_WEAPON]', lang) + ')'
def search_affix(self, search_term, lang):
real_search = extract_search_tag(search_term)
results = {}
for weapon in self.weapons.values():
my_weapon = weapon.copy()
self.translate_weapon(my_weapon, lang)
affixes = [affix for affix in my_weapon['upgrades'] if 'cost' in affix]
for affix in affixes:
search_name = extract_search_tag(affix['name'])
search_desc = extract_search_tag(affix['description'])
if real_search == search_name \
or real_search == search_desc \
or real_search in search_name \
or real_search in search_desc:
if affix['name'] in results:
results[affix['name']]['weapons'].append(my_weapon)
results[affix['name']]['num_weapons'] += 1
else:
results[affix['name']] = affix.copy()
results[affix['name']]['weapons_title'] = _('[SOULFORGE_TAB_WEAPONS]', lang)
results[affix['name']]['weapons'] = [my_weapon]
results[affix['name']]['num_weapons'] = 1
for name, affix in results.items():
if real_search == extract_search_tag(name):
return [affix]
return sorted(results.values(), key=operator.itemgetter('name'))
def search_traitstone(self, search_term, lang):
return self.search_item(search_term, lang,
items=self.traitstones,
lookup_keys=['name'],
translator=self.translate_traitstone)
def translate_traitstone(self, traitstone, lang):
troops = []
for troop_id in traitstone['troop_ids']:
amount = sum([t['amount'] for t in self.troops[troop_id]['traitstones'] if t['id'] == traitstone['id']])
troops.append([_(self.troops[troop_id]['name'], lang), amount])
traitstone['troops'] = sorted(troops, key=operator.itemgetter(1), reverse=True)
classes = []
for class_id in traitstone['class_ids']:
amount = sum([t['amount'] for t in self.classes[class_id]['traitstones'] if t['id'] == traitstone['id']])
classes.append([_(self.classes[class_id]['name'], lang), amount])
traitstone['classes'] = classes
kingdoms = []
for kingdom_id in traitstone['kingdom_ids']:
kingdoms.append(_(self.kingdoms[int(kingdom_id)]['name'], lang))
if not traitstone['kingdom_ids']:
kingdoms.append(_('[ALL_KINGDOMS]', lang))
traitstone['kingdoms'] = kingdoms
traitstone['name'] = _(traitstone['name'], lang)
traitstone['troops_title'] = _('[TROOPS]', lang)
traitstone['classes_title'] = _('[CLASS]', lang)
traitstone['kingdoms_title'] = _('[KINGDOMS]', lang)
def translate_spell(self, spell_id, lang):
spell = self.spells[spell_id]
magic = _('[MAGIC]', lang)
description = _(spell['description'], lang)
for i, (multiplier, amount) in enumerate(spell['effects'], start=1):
spell_amount = f' + {amount}' if amount else ''
multiplier_text = ''
if multiplier > 1:
if multiplier == int(multiplier):
multiplier_text = f'{multiplier:.0f} ⨯ '
else:
multiplier_text = f'{multiplier} ⨯ '
divisor = ''
if multiplier < 1:
number = int(round(1 / multiplier))
divisor = f' / {number}'
damage = f'[{multiplier_text}{magic}{divisor}{spell_amount}]'
number_of_replacements = len(re.findall(r'\{\d\}', description))
has_half_replacement = len(spell['effects']) == number_of_replacements - 1
if '{2}' in description and has_half_replacement:
multiplier *= 0.5
amount *= 0.5
if amount == int(amount):
amount = int(amount)
half_damage = f'[{multiplier} ⨯ {magic}{divisor} + {amount}]'
description = description.replace('{1}', half_damage)
description = description.replace('{2}', damage)
else:
description = description.replace(f'{{{i}}}', damage)
boost = ''
if spell['boost'] and spell['boost'] > 100:
boost = f' [x{int(round(spell["boost"] / 100))}]'
elif spell['boost'] and spell['boost'] != 1 and spell['boost'] <= 100:
boost = f' [{100 / spell["boost"]:0.0f}:1]'
description = f'{description}{boost}'
return {
'name': _(spell['name'], lang),
'cost': spell['cost'],
'description': description,
}
def translate_banner(self, banner, lang):
result = {
'name': _(banner['name'], lang),
'kingdom': _(self.kingdoms[banner['id']]['name'], lang),
'colors': [(_(c[0], 'en').lower(), c[1]) for c in banner['colors'] if c[1]],
'filename': banner['filename'],
}
colors_shorthand = []
for color, amount in result['colors']:
if amount > 0:
colors_shorthand.append(color[0].upper())
else:
colors_shorthand.append(color[0].lower())
result['colors_shorthand'] = ''.join(colors_shorthand)
if not result['colors']:
result['available'] = _('[AVAILABLE_FROM_KINGDOM]', lang).replace('%1', _(f'[{banner["id"]}_NAME]', lang))
return result
def get_event_kingdoms(self, lang):
today = datetime.date.today()
start = today + datetime.timedelta(days=-today.weekday(), weeks=1)
result = self.guess_weekly_kingdom_from_troop_spoilers(lang)
for kingdom_id in self.event_kingdoms:
end = start + datetime.timedelta(days=7)
if kingdom_id != 0:
event_data = {
'start': start,
'end': end,
'kingdom': _(self.kingdoms[kingdom_id]['name'], lang,
default=self.kingdoms[kingdom_id]['reference_name']),
}
result[start] = event_data
start = end
return sorted(result.values(), key=operator.itemgetter('start'))
def guess_weekly_kingdom_from_troop_spoilers(self, lang):
result = {}
latest_date = datetime.datetime.utcnow()
for spoiler in self.spoilers:
if spoiler['type'] == 'troop' \
and spoiler['date'].weekday() == 0 \
and spoiler['date'] > latest_date:
troop = self.troops[spoiler['id']]
if troop['rarity'] == 'Mythic':
continue
kingdom = troop['kingdom']
if not kingdom.get('name') and not kingdom.get('reference_name'):
continue
result[spoiler['date'].date()] = {
'start': spoiler['date'].date(),
'end': spoiler['date'].date() + datetime.timedelta(days=7),
'kingdom': _(kingdom['name'], lang,
default=kingdom['reference_name']) + ' *',
}
latest_date = spoiler['date']
return result
def get_events(self, lang):
today = datetime.date.today()
events = [self.translate_event(e, lang) for e in self.events if today <= e['start']]
return events
def translate_event(self, event, lang):
entry = event.copy()
entry['extra_info'] = ''
if entry['type'] in ('[BOUNTY]', '[HIJACK]') and entry['gacha'] and entry['gacha'] in self.troops:
entry['extra_info'] = _(self.troops[entry['gacha']]['name'], lang)
elif entry['type'] == '[PETRESCUE]' and entry['gacha']:
entry['extra_info'] = self.pets[entry['gacha']][lang].name
elif entry['type'] == '[CLASS_EVENT]' and entry['gacha']:
entry['extra_info'] = _(self.classes[entry['gacha']]['name'], lang)
elif entry['type'] == '[TOWER_OF_DOOM]' and entry['gacha']:
entry['extra_info'] = _(self.troops[entry['gacha']]['name'], lang)
elif entry['type'] == '[DELVE_EVENT]':
entry['extra_info'] = _(self.kingdoms[entry['kingdom_id']]['name'], lang)
elif entry['type'] == '[HIJACK]' and entry['troops']:
entry['extra_info'] = ', '.join(_(self.troops[t]['name'], lang) for t in entry['troops'])
elif entry['type'] == '[INVASION]' and entry['gacha'] and entry['gacha'] in self.troops:
troop = self.troops[entry['gacha']]
troop_name = _(troop['name'], lang)
troop_types = [_(f'[TROOPTYPE_{t.upper()}]', lang) for t in troop['types']]
entry['extra_info'] = f'{troop_name} ({", ".join(troop_types)})'
elif entry['type'] in ('[WEEKLY_EVENT]', '[RARITY_5]') and entry['gacha'] and entry['gacha'] in self.troops:
troop = self.troops[entry['gacha']]
troop_name = _(troop['name'], lang)
kingdom = _(self.kingdoms[entry['kingdom_id']]['name'], lang)
entry['extra_info'] = f'{troop_name} ({kingdom})'
entry['kingdom'] = kingdom
locale = translations.LANGUAGE_CODE_MAPPING.get(lang, lang)
locale = translations.LOCALE_MAPPING.get(locale, 'en_GB') + '.UTF8'
with different_locale(locale):
entry['formatted_start'] = entry['start'].strftime('%b %d')
entry['formatted_end'] = entry['end'].strftime('%b %d')
entry['raw_type'] = entry['type']
entry['type'] = _(entry['type'], lang)
return entry
def get_campaign_tasks(self, lang, _filter=None):
result = {'heading': f'{_("[CAMPAIGN]", lang)}: {_("[TASKS]", lang)}'}
tiers = ['bronze', 'silver', 'gold']
result['campaigns'] = {
f'[MEDAL_LEVEL_{i}]': [self.translate_campaign_task(t, lang) for t in self.campaign_tasks[tier]]
for i, tier in reversed(list(enumerate(tiers))) if _filter is None or tier.lower() == _filter.lower()
}
formatted_start, start_date = get_next_monday_in_locale(date=None, lang=lang)
result['has_content'] = any([len(c) > 0 for c in result['campaigns'].values()])
result['background'] = f'Background/{self.campaign_tasks["kingdom"]["filename"]}_full.png'
result['gow_logo'] = 'Atlas/gow_logo.png'
kingdom_filebase = self.campaign_tasks['kingdom']['filename']
result['kingdom_logo'] = f'Troopcardshields_{kingdom_filebase}_full.png'
result['kingdom'] = _(self.campaign_tasks['kingdom']['name'], lang)
result['raw_date'] = start_date
result['date'] = formatted_start
result['lang'] = lang
result['texts'] = {
'campaign': _('[CAMPAIGN]', lang),
'team': _('[LITE_CHAT_TEAM_START]', lang),
}
return result
def get_reroll_tasks(self, lang, _filter=None):
tiers = ['bronze', 'silver', 'gold']
tasks = {
f'[MEDAL_LEVEL_{i}]': [self.translate_campaign_task(t, lang) for t in self.reroll_tasks[tier]]
for i, tier in reversed(list(enumerate(tiers))) if _filter is None or tier.lower() == _filter.lower()
}
return tasks
def translate_campaign_task(self, task, lang):
new_task = task.copy()
color_code = int(new_task['value1']) if new_task['value1'].isdigit() else 666
color = COLORS[color_code].upper() if color_code < len(COLORS) else '`?`'
if isinstance(new_task.get('y'), str):
new_task['y'] = _(f'[{new_task["y"].upper()}]', lang)
new_task['plural'] = int(new_task.get('x', 1)) != 1
replacements = {
'{WeaponType}': '[WEAPONTYPE_{c:u}]',
'{Kingdom}': '[{d:u}_NAME]',
'{Banner}': '[{c:u}_BANNERNAME]',
'{Class}': '[HEROCLASS_{c:l}_NAME]',
'{Color}': f'[GEM_{color}]',
'{TroopType}': '[TROOPTYPE_{value1:u}]',
'{Troop}': '{{[{value1}][name]}}',
'{Value0}': task['value0'],
'{Value1}': task['value1'],
'{0}': '{x}',
'{1}': task['c'],
'{2}': '{x} {y}',
}
new_task['title'] = _(new_task['title'], lang, plural=new_task['plural'])
new_task['name'] = _(new_task["name"], lang, plural=new_task['plural'])
if '{0}' not in new_task['name'] and '{2}' not in new_task['name']:
new_task['name'] = f'{task["x"]}x ' + new_task['name']
for before, after in replacements.items():
if before in new_task['title'] or before in new_task['name']:
translated = _(after.format(**new_task).format(self.troops), lang, plural=new_task['plural'])
if '`?`' in translated:
translated = '`?`'
new_task['title'] = new_task['title'].replace(before, translated)
new_task['name'] = new_task['name'].replace(before, translated)
where = ''
if new_task['value1'] == '`?`':
pass
elif task['name'] == '[TASK_KILL_TROOP_COLOR]':
color_kingdoms = self.get_color_kingdoms(lang)
target_kingdom = color_kingdoms[color.lower()]['name']
where = f' --> {target_kingdom}'
elif task['name'] == '[TASK_KILL_TROOP_ID]':
target_kingdom = _(self.troops[int(task['value1'])]['kingdom']['name'], lang)
pvp = _('[PVP]', lang)
weekly_event = _('[WEEKLY_EVENT]', lang)
where = f' --> {target_kingdom} / {pvp} / {weekly_event}'
elif task['name'] == '[TASK_KILL_TROOP_TYPE]':
troop_type_kingdoms = dict(self.get_type_kingdoms(lang))
troop_type = _(f'[TROOPTYPE_{task["value1"].upper()}]', lang)
target_kingdom = troop_type_kingdoms[troop_type]['name']
where = f' --> {target_kingdom}'
new_task['name'] += where
return new_task
def get_spoilers(self, lang):
spoilers = []
now = datetime.datetime.utcnow()
near_term_spoilers = [s for s in self.spoilers if now <= s['date'] <= now + datetime.timedelta(days=180)]
for spoiler in near_term_spoilers:
translated = self.translate_spoiler(spoiler, lang)
if translated:
spoilers.append(translated)
return spoilers
def translate_spoiler(self, spoiler, lang):
# FIXME this is transitional until all new models are in place.
if spoiler['type'] in ['pet']:
item = getattr(self, spoiler['type'] + 's').get(spoiler['id'])
if not item:
return
entry = item[translations.LANGUAGE_CODE_MAPPING.get(lang, lang)].data.copy()
else:
entry = getattr(self, spoiler['type'] + 's').get(spoiler['id'], {}).copy()
if not entry:
return None
entry['name'] = _(entry['name'], lang)
if self.is_untranslated(entry['name']):
entry['name'] = entry.get('reference_name', entry['name'])
entry['type'] = spoiler['type']
entry['date'] = spoiler['date'].date()
entry['event'] = _('[GLOG_EVENT]', lang) + ': ' if entry.get('event') else ''
if 'rarity' in entry:
entry['rarity_title'] = _('[RARITY]', lang)
if entry['rarity'] in TROOP_RARITIES:
rarity_number = TROOP_RARITIES.index(entry['rarity'])
entry['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
kingdom_id = entry.get('kingdom_id')
if kingdom_id:
kingdom = self.kingdoms[kingdom_id]
entry['kingdom'] = _(kingdom['name'], lang)
if self.is_untranslated(entry['kingdom']):
entry['kingdom'] = kingdom['reference_name']
return entry
def get_soulforge(self, lang):
title = _('[SOULFORGE]', lang)
craftable_items = {}
for category, recipes in self.soulforge.items():
recipe_type = _(category, lang)
craftable_items[recipe_type] = [self.translate_recipe(r, lang) for r in recipes]
return title, craftable_items
@staticmethod
def translate_recipe(recipe, lang):
new_recipe = recipe.copy()
new_recipe['name'] = _(recipe['name'], lang)
rarity_number = WEAPON_RARITIES.index(new_recipe['rarity'])
new_recipe['rarity_number'] = rarity_number
new_recipe['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
return new_recipe
@staticmethod
def translate_categories(categories, lang):
def try_different_translated_versions_because_devs_are_stupid(cat):
lookup = f'[{cat.upper()}S]'
result = _(lookup, lang)
if result == lookup:
lookup = f'[{cat.upper()}S:]'
result = _(lookup, lang)[:-1]
if result == lookup[:-1]:
result = _(f'[{cat.upper()}]', lang)
return result
translated = [try_different_translated_versions_because_devs_are_stupid(c) for c in categories]
return dict(zip(categories, translated))
def get_levels(self, lang):
levels = [{
'level': level['level'],
'bonus': _(level['bonus'], lang),
} for level in self.levels]
return levels
def translate_toplist(self, toplist_id, lang):
toplist = self.toplists.get(toplist_id)
if not toplist:
return None
result = toplist.copy()
result['items'] = []
for item_search in toplist['items']:
items = self.search_troop(item_search, lang)
if not items:
items = self.search_weapon(item_search, lang)
if not items:
continue
result['items'].append(items[0])
return result
async def create_toplist(self, message, description, items, lang, update_id):
toplist_id = await self.toplists.add(message.author.id, message.author.display_name, description, items,
update_id)
toplist = self.translate_toplist(toplist_id, lang)
return toplist
def kingdom_percentage(self, filter_name, filter_values, lang):
result = {}
now = datetime.datetime.utcnow()
hidden_kingdoms = [3032, 3033, 3034, 3038]
for filter_ in filter_values:
kingdoms = []
for kingdom in self.kingdoms.values():
if kingdom['location'] != 'krystara':
continue
if kingdom['id'] in hidden_kingdoms:
continue
all_troops = [self.troops.get(t) for t in kingdom['troop_ids']]
available_troops = [t for t in all_troops if t and t.get('release_date', now) <= now]
if not available_troops:
continue
fitting_troops = [t for t in available_troops if filter_ in t[filter_name]]
kingdoms.append({
'name': _(kingdom['name'], lang),
'total': len(available_troops),
'fitting_troops': len(fitting_troops),
'percentage': len(fitting_troops) / len(available_troops),
})
top_kingdom = sorted(kingdoms, key=operator.itemgetter('percentage'), reverse=True)[0]
result[filter_] = top_kingdom
return result
def get_color_kingdoms(self, lang):
colors_without_skulls = COLORS[:6]
return self.kingdom_percentage('colors', colors_without_skulls, lang)
def get_type_kingdoms(self, lang):
forbidden_types = {'None', 'Boss', 'Tower', 'Castle', 'Doom', 'Gnome'}
troop_types = self.troop_types - forbidden_types
result = self.kingdom_percentage('types', troop_types, lang)
translated_result = {
_(f"[TROOPTYPE_{troop_type.upper()}]", lang): kingdom
for troop_type, kingdom in result.items()
}
return sorted(translated_result.items(), key=operator.itemgetter(0))
def get_adventure_board(self, lang):
result = []
for adventure in self.adventure_board:
result.append(self.translate_adventure(adventure, lang))
return result
@staticmethod
def translate_adventure(adventure, lang):
def change_form(key, value):
if value == 1 and key.startswith('[KEYTYPE'):
key = key.replace('_TITLE', '_SINGLE')
return _(key, lang).replace('%1 ', ''), value
result = adventure.copy()
result['name'] = _(result['name'], lang)
result['reward_types'] = set(result['rewards'].keys())
result['rewards'] = dict([change_form(key, value) for key, value in result['rewards'].items()])
result['rarity'] = _(result['rarity'], lang)
return result
@staticmethod
def is_untranslated(param):
if not param:
return True
return param[0] + param[-1] == '[]'
def get_toplist_troop_ids(self, items, lang):
result = []
for search_term in items.split(','):
items = self.search_troop(search_term, lang)
if not items:
items = self.search_weapon(search_term, lang)
if items:
result.append(str(items[0]['id']))
return result
def get_soulforge_weapon_image_data(self, search_term, date, switch, lang):
search_result = self.search_weapon(search_term, lang)
if len(search_result) != 1:
return
weapon = search_result[0].copy()
requirements = SOULFORGE_REQUIREMENTS[weapon['raw_rarity']].copy()
alternate_kingdom_id = weapon.get('event_faction')
if alternate_kingdom_id:
requirements = UNDERWORLD_SOULFORGE_REQUIREMENTS[weapon['raw_rarity']].copy()
jewels = []
for color in weapon['colors']:
color_code = COLORS.index(color)
filename = f'Runes_Jewel{color_code:02n}_full.png'
jewels.append({
'filename': filename,
'amount': requirements['jewels'],
'available_on': translate_day(color_code, lang),
'kingdoms': sorted([_(kingdom['name'], lang) for kingdom in self.kingdoms.values()
if 'primary_color' in kingdom
and color == kingdom['primary_color']
and kingdom['location'] == 'krystara']),
})
requirements['jewels'] = jewels
kingdom = self.kingdoms[weapon['kingdom_id']]
alternate_kingdom = None
alternate_kingdom_name = None
alternate_kingdom_filename = None
if alternate_kingdom_id:
alternate_kingdom = self.kingdoms[alternate_kingdom_id]
alternate_kingdom_name = _(alternate_kingdom['name'], lang)
alternate_kingdom_filename = alternate_kingdom['filename']
affixes = [{
'name': _(affix['name'], lang),
'description': _(affix['description'], lang),
'color': list(RARITY_COLORS.values())[i],
} for i, affix in enumerate(weapon['affixes'], start=1)]
mana_colors = ''.join([c.title() for c in weapon['colors']]).replace('Brown', 'Orange')
kingdom_filebase = self.kingdoms[weapon['kingdom_id']]['filename']
in_soulforge_text = _('[WEAPON_AVAILABLE_FROM_SOULFORGE]', lang)
if alternate_kingdom_id:
in_soulforge_text += ' (' + _(f'[{weapon["event_faction"]}_NAME]', lang) + ' ' + _(
'[FACTION_WEAPON]', lang) + ')'
date = get_next_monday_in_locale(date, lang)[0]
result = {
'switch': switch,
'name': weapon['name'],
'rarity_color': RARITY_COLORS[weapon['raw_rarity']],
'rarity': weapon['rarity'],
'filename': f'Spells/Cards_{weapon["spell_id"]}_full.png',
'description': weapon['spell']['description'],
'kingdom': weapon['kingdom'],
'alternate_kingdom': alternate_kingdom_name,
'kingdom_logo': f'Troopcardshields_{kingdom_filebase}_full.png',
'alternate_kingdom_logo': f'Troopcardshields_{alternate_kingdom_filename}_full.png',
'type': _(weapon['type'], lang),
'background': f'Background/{kingdom["filename"]}_full.png',
'gow_logo': 'Atlas/gow_logo.png',
'requirements': requirements,
'affixes': affixes,
'affix_icon': 'Atlas/affix.png',
'gold_medal': 'Atlas/medal_gold.png',
'mana_color': f'Troopcardall_{mana_colors}_full.png',
'mana_cost': weapon['spell']['cost'],
'stat_increases': {'attack': sum(weapon['attack_increase']),
'health': sum(weapon['health_increase']),
'armor': sum(weapon['armor_increase']),
'magic': sum(weapon['magic_increase'])},
'stat_icon': 'Atlas/{stat}.png',
'texts': {
'from_battles': _('[PET_LOOT_BONUS]', lang).replace('+%1% %2 ', '').replace('+%1 %2 ', ''),
'gem_bounty': _('[DUNGEON_OFFER_NAME]', lang),
'kingdom_challenges': f'{_("[KINGDOM]", lang)} {_("[CHALLENGES]", lang)}',
'soulforge': _('[SOULFORGE]', lang),
'resources': _('[RESOURCES]', lang),
'dungeon': _('[DUNGEON]', lang),
'dungeon_battles': _('[TASK_WIN_DUNGEON_BATTLES]', lang).replace('{0}', '3').replace('\x19', 's'),
'tier_8': _('[CHALLENGE_TIER_8_ROMAN]', lang),
'available': _('[AVAILABLE]', lang),
'in_soulforge': in_soulforge_text,
'n_gems': _('[GEMS_GAINED]', lang).replace('%1', '50'),
},
'date': date,
}
return result
def translate_drop_chances(self, data: dict, lang):
for key, item in data.copy().items():
if not self.is_untranslated(key):
continue
new_key = _(key, lang)
if key == '[KEYTYPE_5_TITLE]':
new_key = f'{new_key}*'
data[new_key] = item.copy()
if key != new_key:
del data[key]
if isinstance(data[new_key], dict):
self.translate_drop_chances(data[new_key], lang)
def get_drop_chances(self, lang):
drop_chances = self.drop_chances.copy()
self.translate_drop_chances(drop_chances, lang)
return drop_chances
def get_current_event(self, lang, emojis):
event = copy.deepcopy(self.weekly_event)
kingdoms = self.search_kingdom(event['kingdom_id'], lang)
if kingdoms:
event['kingdom'] = kingdoms[0]
event['name'] = event['name'].get(lang, _(EVENT_TYPES[event['type']], lang))
event['lore'] = event['lore'].get(lang, '')
event['currencies'] = [{
'name': currency['name'].get(lang, ''),
'value': _('[N_TIMES_POINTS]', lang).replace('%1', str(currency['value']))
} for currency in event['currencies']]
for stage in event['rewards'].keys():
for reward in event['rewards'][stage]['rewards']:
reward_type = reward['type']
reward['type'] = _(reward_type, lang).replace('%1', '').strip()
if reward_type == '[TITLE]':
reward['type'] += ' (' + _(f'[TITLE_{reward["data"]}]', lang) + ')'
if reward_type == '[TROOP]':
reward['type'] = _(self.troops.get(reward['data'])['name'], lang)
for item in ('token', 'badge', 'medal'):
if not event[item]:
continue
event[item] = {
'name': _(f'[WONDER_{event[item]}_NAME]', lang),
'description': _(f'[WONDER_{event[item]}_DESC]', lang),
}
def translate_restriction(r):
if isinstance(r, int):
return emojis.get(COLORS[r])
return _(r, lang)
def translate_battle(b):
result = b.copy()
result['name'] = b['names'].get(lang)
del result['names']
return result
event['restrictions'] = {_(r, lang): ', '.join([translate_restriction(i) for i in v]) for r, v in
event['restrictions'].items() if v}
event['troop'] = _(event['troop'], lang)
if event['weapon_id']:
event['weapon'] = _(self.weapons.get(event['weapon_id'], {'name': ''})['name'], lang)
new_battles = []
for battle in event['battles']:
tb = translate_battle(battle)
if tb not in new_battles:
new_battles.append(tb)
event['battles'] = new_battles
return event
def get_effects(self, lang):
positive = _('[TROOPHELP_ALLPOSITIVESTATUSEFFECTS_1]', lang)
negative = _('[TROOPHELP_ALLNEGATIVESTATUSEFFECTS_1]', lang)
result = {
positive: [],
negative: [],
}
for effect in self.effects:
key = positive if effect in self.positive_effects else negative
result[key].append({
'name': _(f'[TROOPHELP_{effect}_1]', lang),
'description': _(f'[TROOPHELP_{effect}_2]', lang),
})
result[positive] = sorted(result[positive], key=operator.itemgetter('name'))
result[negative] = sorted(result[negative], key=operator.itemgetter('name'))
return result
def get_active_gems(self):
return [g['gem_type'] for g in self.active_gems.values()]
@staticmethod
def get_storms(lang):
storms = {}
fields = {
'1': 'name',
'2': 'description',
}
p = re.compile(r'\[TROOPHELP_STORM\d+_\d+')
for key, value in t.translations[lang].items():
if not p.match(key):
continue
field = fields[key[-2]]
storm_key = key[:-2]
storms.setdefault(storm_key, {})[field] = value
return storms
def get_warbands(self, lang):
warbands = [k.copy() for k in self.kingdoms.values() if 'WARBAND' in k['reference_name']]
for warband in warbands:
self.translate_kingdom(warband, lang)
return warbands
def get_map_data(self, lang, location):
if not location:
location = 'krystara'
base_folder = 'Worldmap'
map_data = {
'krystara': {
'title': _('[MAPNAME_MAIN]', lang),
'map': f'{base_folder}/Main/Main_Albedo_full.png',
'water': f'{base_folder}/Main/Water_Main_Albedo_full.png',
'height': f'{base_folder}/Main/Main_Height_full.png',
'blend_mode': 'overlay',
},
'underworld': {
'title': _('[MAPNAME_UNDERWORLD]', lang),
'map': f'{base_folder}/Underworld/Underworld_Albedo_full.png',
'water': f'{base_folder}/Underworld/Water_Underworld_Albedo_full.png',
'height': f'{base_folder}/Underworld/Underworld_Height_full.png',
'blend_mode': 'stereo',
}
}
result = map_data[location]
result['kingdoms'] = []
result['title'] = f"Gary's Gems of War Map: {result['title']}"
def is_pseudo_kingdom(k):
return k['location'] == 'krystara' and k['links'] == {-1}
for kingdom in self.kingdoms.values():
if 'description' not in kingdom:
continue
if kingdom['location'] != location:
continue
if is_pseudo_kingdom(kingdom):
continue
my_kingdom = kingdom.copy()
self.translate_kingdom(my_kingdom, lang)
if self.is_untranslated(my_kingdom['name']):
continue
result['kingdoms'].append(my_kingdom)
return result
|
[
"logging.getLogger",
"logging.StreamHandler",
"re.compile",
"game_constants.TROOP_RARITIES.index",
"util.dig",
"game_constants.RARITY_COLORS.values",
"translations.Translations",
"copy.deepcopy",
"operator.itemgetter",
"datetime.timedelta",
"translations.LOCALE_MAPPING.get",
"util.extract_search_tag",
"models.bookmark.Bookmark",
"util.get_next_monday_in_locale",
"game_constants.COLORS.index",
"datetime.date.today",
"re.findall",
"translations.LANGUAGE_CODE_MAPPING.get",
"game_constants.WEAPON_RARITIES.index",
"datetime.datetime.utcnow",
"logging.Formatter",
"models.toplist.Toplist",
"data_source.game_data.GameData",
"util.translate_day",
"importlib.reload",
"calendar.different_locale"
] |
[((542, 605), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)-15s [%(levelname)s] %(message)s"""'], {}), "('%(asctime)-15s [%(levelname)s] %(message)s')\n", (559, 605), False, 'import logging\n'), ((616, 639), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (637, 639), False, 'import logging\n'), ((705, 732), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (722, 732), False, 'import logging\n'), ((786, 813), 'translations.Translations', 'translations.Translations', ([], {}), '()\n', (811, 813), False, 'import translations\n'), ((870, 900), 'importlib.reload', 'importlib.reload', (['translations'], {}), '(translations)\n', (886, 900), False, 'import importlib\n'), ((919, 946), 'translations.Translations', 'translations.Translations', ([], {}), '()\n', (944, 946), False, 'import translations\n'), ((1014, 1024), 'data_source.game_data.GameData', 'GameData', ([], {}), '()\n', (1022, 1024), False, 'from data_source.game_data import GameData\n'), ((1921, 1930), 'models.toplist.Toplist', 'Toplist', ([], {}), '()\n', (1928, 1930), False, 'from models.toplist import Toplist\n'), ((1956, 1966), 'models.bookmark.Bookmark', 'Bookmark', ([], {}), '()\n', (1964, 1966), False, 'from models.bookmark import Bookmark\n'), ((19577, 19616), 'game_constants.WEAPON_RARITIES.index', 'WEAPON_RARITIES.index', (["weapon['rarity']"], {}), "(weapon['rarity'])\n", (19598, 19616), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((22125, 22156), 'util.extract_search_tag', 'extract_search_tag', (['search_term'], {}), '(search_term)\n', (22143, 22156), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((27805, 27826), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (27824, 27826), False, 'import datetime\n'), ((28627, 28653), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (28651, 28653), False, 'import datetime\n'), ((29581, 29602), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29600, 29602), False, 'import datetime\n'), ((31501, 31551), 'translations.LANGUAGE_CODE_MAPPING.get', 'translations.LANGUAGE_CODE_MAPPING.get', (['lang', 'lang'], {}), '(lang, lang)\n', (31539, 31551), False, 'import translations\n'), ((32401, 32448), 'util.get_next_monday_in_locale', 'get_next_monday_in_locale', ([], {'date': 'None', 'lang': 'lang'}), '(date=None, lang=lang)\n', (32426, 32448), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((36338, 36364), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (36362, 36364), False, 'import datetime\n'), ((38632, 38675), 'game_constants.WEAPON_RARITIES.index', 'WEAPON_RARITIES.index', (["new_recipe['rarity']"], {}), "(new_recipe['rarity'])\n", (38653, 38675), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((40588, 40614), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (40612, 40614), False, 'import datetime\n'), ((49022, 49054), 'copy.deepcopy', 'copy.deepcopy', (['self.weekly_event'], {}), '(self.weekly_event)\n', (49035, 49054), False, 'import copy\n'), ((52239, 52280), 're.compile', 're.compile', (['"""\\\\[TROOPHELP_STORM\\\\d+_\\\\d+"""'], {}), "('\\\\[TROOPHELP_STORM\\\\d+_\\\\d+')\n", (52249, 52280), False, 'import re\n'), ((5857, 5888), 'util.extract_search_tag', 'extract_search_tag', (['search_term'], {}), '(search_term)\n', (5875, 5888), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((7264, 7301), 'game_constants.TROOP_RARITIES.index', 'TROOP_RARITIES.index', (["troop['rarity']"], {}), "(troop['rarity'])\n", (7284, 7301), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((11549, 11587), 'game_constants.COLORS.index', 'COLORS.index', (["kingdom['primary_color']"], {}), "(kingdom['primary_color'])\n", (11561, 11587), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((15047, 15078), 'util.extract_search_tag', 'extract_search_tag', (['search_term'], {}), '(search_term)\n', (15065, 15078), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((17543, 17574), 'util.extract_search_tag', 'extract_search_tag', (['search_term'], {}), '(search_term)\n', (17561, 17574), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((31569, 31617), 'translations.LOCALE_MAPPING.get', 'translations.LOCALE_MAPPING.get', (['locale', '"""en_GB"""'], {}), "(locale, 'en_GB')\n", (31600, 31617), False, 'import translations\n'), ((31641, 31665), 'calendar.different_locale', 'different_locale', (['locale'], {}), '(locale)\n', (31657, 31665), False, 'from calendar import different_locale\n'), ((44273, 44292), 'game_constants.COLORS.index', 'COLORS.index', (['color'], {}), '(color)\n', (44285, 44292), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((45998, 46035), 'util.get_next_monday_in_locale', 'get_next_monday_in_locale', (['date', 'lang'], {}), '(date, lang)\n', (46023, 46035), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((5920, 5952), 'util.extract_search_tag', 'extract_search_tag', (["item['name']"], {}), "(item['name'])\n", (5938, 5952), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((6195, 6223), 'operator.itemgetter', 'operator.itemgetter', (['sort_by'], {}), '(sort_by)\n', (6214, 6223), False, 'import operator\n'), ((9701, 9728), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (9720, 9728), False, 'import operator\n'), ((10460, 10487), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (10479, 10487), False, 'import operator\n'), ((10727, 10754), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (10746, 10754), False, 'import operator\n'), ((13140, 13167), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (13159, 13167), False, 'import operator\n'), ((14970, 14991), 'util.extract_search_tag', 'extract_search_tag', (['t'], {}), '(t)\n', (14988, 14991), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((15880, 15907), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (15899, 15907), False, 'import operator\n'), ((18636, 18663), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (18655, 18663), False, 'import operator\n'), ((22460, 22493), 'util.extract_search_tag', 'extract_search_tag', (["affix['name']"], {}), "(affix['name'])\n", (22478, 22493), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((22524, 22564), 'util.extract_search_tag', 'extract_search_tag', (["affix['description']"], {}), "(affix['description'])\n", (22542, 22564), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((23373, 23397), 'util.extract_search_tag', 'extract_search_tag', (['name'], {}), '(name)\n', (23391, 23397), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((23474, 23501), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (23493, 23501), False, 'import operator\n'), ((24154, 24176), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (24173, 24176), False, 'import operator\n'), ((25854, 25890), 're.findall', 're.findall', (['"""\\\\{\\\\d\\\\}"""', 'description'], {}), "('\\\\{\\\\d\\\\}', description)\n", (25864, 25890), False, 'import re\n'), ((28045, 28071), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (28063, 28071), False, 'import datetime\n'), ((28492, 28520), 'operator.itemgetter', 'operator.itemgetter', (['"""start"""'], {}), "('start')\n", (28511, 28520), False, 'import operator\n'), ((37706, 37743), 'game_constants.TROOP_RARITIES.index', 'TROOP_RARITIES.index', (["entry['rarity']"], {}), "(entry['rarity'])\n", (37726, 37743), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((42371, 42393), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (42390, 42393), False, 'import operator\n'), ((51842, 51869), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (51861, 51869), False, 'import operator\n'), ((51927, 51954), 'operator.itemgetter', 'operator.itemgetter', (['"""name"""'], {}), "('name')\n", (51946, 51954), False, 'import operator\n'), ((5782, 5794), 'util.dig', 'dig', (['item', 'k'], {}), '(item, k)\n', (5785, 5794), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((44504, 44535), 'util.translate_day', 'translate_day', (['color_code', 'lang'], {}), '(color_code, lang)\n', (44517, 44535), False, 'from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day\n'), ((14610, 14646), 'game_constants.COLORS.index', 'COLORS.index', (["_class['weapon_color']"], {}), "(_class['weapon_color'])\n", (14622, 14646), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((29286, 29312), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (29304, 29312), False, 'import datetime\n'), ((36449, 36477), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(180)'}), '(days=180)\n', (36467, 36477), False, 'import datetime\n'), ((41635, 41668), 'operator.itemgetter', 'operator.itemgetter', (['"""percentage"""'], {}), "('percentage')\n", (41654, 41668), False, 'import operator\n'), ((45469, 45491), 'game_constants.RARITY_COLORS.values', 'RARITY_COLORS.values', ([], {}), '()\n', (45489, 45491), False, 'from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES\n'), ((36988, 37038), 'translations.LANGUAGE_CODE_MAPPING.get', 'translations.LANGUAGE_CODE_MAPPING.get', (['lang', 'lang'], {}), '(lang, lang)\n', (37026, 37038), False, 'import translations\n')]
|
import math
from numpy import linalg
from scipy import stats
from scipy.spatial import distance
import numpy
def euclidean(p, Q):
return numpy.apply_along_axis(lambda q: linalg.norm(p - q), 0, Q)
def hellinger(p, Q):
factor = 1 / math.sqrt(2)
sqrt_p = numpy.sqrt(p)
return factor * numpy.apply_along_axis(
lambda q: linalg.norm(sqrt_p - numpy.sqrt(q)), 0, Q
)
def jensen_shannon_distance(p, Q):
"""Square root of Jensen-Shannon divergence."""
return numpy.apply_along_axis(lambda q: distance.jensenshannon(p, q), 0, Q)
def k_directed(p, Q):
"""See: <NAME>. "Divergence Measures Based on the Shannon Entropy". 1991."""
return numpy.apply_along_axis(lambda q: stats.entropy(p, (p + q) / 2), 0, Q)
def kullback_leibler(p, Q):
return numpy.apply_along_axis(lambda q: stats.entropy(p, q), 0, Q)
def neyman_chi_square(p, Q):
return numpy.apply_along_axis(lambda q: numpy.sum(numpy.square(p - q) / q), 0, Q)
def pearson_chi_square(p, Q):
return numpy.apply_along_axis(lambda q: numpy.sum(numpy.square(p - q) / p), 0, Q)
def total_variation(p, Q):
return 0.5 * numpy.apply_along_axis(lambda q: linalg.norm(p - q, 1), 0, Q)
|
[
"scipy.stats.entropy",
"numpy.sqrt",
"math.sqrt",
"numpy.square",
"numpy.linalg.norm",
"scipy.spatial.distance.jensenshannon"
] |
[((269, 282), 'numpy.sqrt', 'numpy.sqrt', (['p'], {}), '(p)\n', (279, 282), False, 'import numpy\n'), ((243, 255), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (252, 255), False, 'import math\n'), ((177, 195), 'numpy.linalg.norm', 'linalg.norm', (['(p - q)'], {}), '(p - q)\n', (188, 195), False, 'from numpy import linalg\n'), ((527, 555), 'scipy.spatial.distance.jensenshannon', 'distance.jensenshannon', (['p', 'q'], {}), '(p, q)\n', (549, 555), False, 'from scipy.spatial import distance\n'), ((713, 742), 'scipy.stats.entropy', 'stats.entropy', (['p', '((p + q) / 2)'], {}), '(p, (p + q) / 2)\n', (726, 742), False, 'from scipy import stats\n'), ((824, 843), 'scipy.stats.entropy', 'stats.entropy', (['p', 'q'], {}), '(p, q)\n', (837, 843), False, 'from scipy import stats\n'), ((1165, 1186), 'numpy.linalg.norm', 'linalg.norm', (['(p - q)', '(1)'], {}), '(p - q, 1)\n', (1176, 1186), False, 'from numpy import linalg\n'), ((936, 955), 'numpy.square', 'numpy.square', (['(p - q)'], {}), '(p - q)\n', (948, 955), False, 'import numpy\n'), ((1054, 1073), 'numpy.square', 'numpy.square', (['(p - q)'], {}), '(p - q)\n', (1066, 1073), False, 'import numpy\n'), ((366, 379), 'numpy.sqrt', 'numpy.sqrt', (['q'], {}), '(q)\n', (376, 379), False, 'import numpy\n')]
|
import json
import math
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from pathlib import Path
from typing import List, Optional
import numpy as np
from vad.util.time_utils import (
format_timedelta_to_milliseconds,
format_timedelta_to_timecode,
parse_timecode_to_timedelta,
)
class VoiceActivityVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
class VoiceActivityMillisecondsVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
@dataclass
class Activity:
start: timedelta
end: timedelta
@dataclass
class VoiceActivity:
duration: timedelta
activities: List[Activity]
probs_sample_rate: Optional[int]
probs: Optional[List[float]]
@classmethod
def load(cls, path: Path):
with path.open() as file:
voice_activity_data = json.load(file)
return VoiceActivity.from_json(voice_activity_data)
@classmethod
def from_json(cls, voice_activity_data: dict):
version = voice_activity_data["version"]
if version == VoiceActivityVersion.v01.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityVersion.v02.value:
if voice_activity_data["time_format"] == "timecode":
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif voice_activity_data["time_format"] == "millisecond":
voice_activity = cls(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
elif version == VoiceActivityVersion.v03.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(activity["start"]),
end=parse_timecode_to_timedelta(activity["end"]),
)
for activity in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def save(self, path: Path, version: VoiceActivityVersion = VoiceActivityVersion.v03):
voice_activity_data = self.to_json(version)
with path.open("w") as file:
json.dump(voice_activity_data, file, ensure_ascii=False, indent=4)
def to_json(self, version: VoiceActivityVersion = VoiceActivityVersion.v03):
if version == VoiceActivityVersion.v01:
voice_activity_formatted = {
"version": VoiceActivityVersion.v01.value,
"duration": format_timedelta_to_timecode(self.duration),
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v02:
voice_activity_formatted = {
"version": VoiceActivityVersion.v02.value,
"duration": format_timedelta_to_timecode(self.duration),
"time_format": "timecode",
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v03:
voice_activity_formatted = {
"version": VoiceActivityVersion.v03.value,
"duration": format_timedelta_to_timecode(self.duration),
"activities": [
{
"start": format_timedelta_to_timecode(activity.start),
"end": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_formatted
def to_milliseconds(
self, version: VoiceActivityMillisecondsVersion = VoiceActivityMillisecondsVersion.v03
):
if version == VoiceActivityMillisecondsVersion.v02:
voice_activity_milliseconds = {
"version": version.value,
"duration": format_timedelta_to_milliseconds(self.duration),
"time_format": "millisecond",
"voice_activity": [
{
"start_time": format_timedelta_to_milliseconds(activity.start),
"end_time": format_timedelta_to_milliseconds(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityMillisecondsVersion.v03:
voice_activity_milliseconds = {
"version": version.value,
"duration": {"total_milliseconds": format_timedelta_to_milliseconds(self.duration)},
"activities": [
{
"start": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.start)
},
"end": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.end)
},
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_milliseconds
@classmethod
def from_milliseconds(cls, voice_activity_data: dict):
version = voice_activity_data["version"] # version of milliseconds format
if version == VoiceActivityMillisecondsVersion.v02.value:
voice_activity = VoiceActivity(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityMillisecondsVersion.v03.value:
voice_activity = VoiceActivity(
duration=timedelta(
milliseconds=voice_activity_data["duration"]["total_milliseconds"]
),
activities=[
Activity(
start=timedelta(milliseconds=segment["start"]["total_milliseconds"]),
end=timedelta(milliseconds=segment["end"]["total_milliseconds"]),
)
for segment in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def to_labels(self, sample_rate: int) -> np.array:
total_samples = int(self.duration.total_seconds() * sample_rate)
labels = np.zeros(total_samples, dtype=np.long)
for activity in self.activities:
start_sample = int(activity.start.total_seconds() * sample_rate)
end_sample = int(activity.end.total_seconds() * sample_rate)
labels[start_sample:end_sample] = 1
return labels
|
[
"vad.util.time_utils.parse_timecode_to_timedelta",
"vad.util.time_utils.format_timedelta_to_milliseconds",
"numpy.zeros",
"json.load",
"datetime.timedelta",
"json.dump",
"vad.util.time_utils.format_timedelta_to_timecode"
] |
[((10019, 10057), 'numpy.zeros', 'np.zeros', (['total_samples'], {'dtype': 'np.long'}), '(total_samples, dtype=np.long)\n', (10027, 10057), True, 'import numpy as np\n'), ((863, 878), 'json.load', 'json.load', (['file'], {}), '(file)\n', (872, 878), False, 'import json\n'), ((4223, 4289), 'json.dump', 'json.dump', (['voice_activity_data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(voice_activity_data, file, ensure_ascii=False, indent=4)\n', (4232, 4289), False, 'import json\n'), ((4548, 4591), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['self.duration'], {}), '(self.duration)\n', (4576, 4591), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((6743, 6790), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['self.duration'], {}), '(self.duration)\n', (6775, 6790), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((1170, 1230), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["voice_activity_data['duration']"], {}), "(voice_activity_data['duration'])\n", (1197, 1230), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5198, 5241), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['self.duration'], {}), '(self.duration)\n', (5226, 5241), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((8489, 8544), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "voice_activity_data['duration']"}), "(milliseconds=voice_activity_data['duration'])\n", (8498, 8544), False, 'from datetime import timedelta\n'), ((4689, 4733), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.start'], {}), '(activity.start)\n', (4717, 4733), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((4771, 4813), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.end'], {}), '(activity.end)\n', (4799, 4813), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5891, 5934), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['self.duration'], {}), '(self.duration)\n', (5919, 5934), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((6934, 6982), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.start'], {}), '(activity.start)\n', (6966, 6982), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7020, 7066), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.end'], {}), '(activity.end)\n', (7052, 7066), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7472, 7519), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['self.duration'], {}), '(self.duration)\n', (7504, 7519), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((9171, 9248), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "voice_activity_data['duration']['total_milliseconds']"}), "(milliseconds=voice_activity_data['duration']['total_milliseconds'])\n", (9180, 9248), False, 'from datetime import timedelta\n'), ((1918, 1978), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["voice_activity_data['duration']"], {}), "(voice_activity_data['duration'])\n", (1945, 1978), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((3415, 3475), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["voice_activity_data['duration']"], {}), "(voice_activity_data['duration'])\n", (3442, 3475), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5382, 5426), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.start'], {}), '(activity.start)\n', (5410, 5426), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5464, 5506), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.end'], {}), '(activity.end)\n', (5492, 5506), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((1321, 1376), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['start_time']"], {}), "(speech_block['start_time'])\n", (1348, 1376), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((1406, 1459), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['end_time']"], {}), "(speech_block['end_time'])\n", (1433, 1459), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((2655, 2710), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "voice_activity_data['duration']"}), "(milliseconds=voice_activity_data['duration'])\n", (2664, 2710), False, 'from datetime import timedelta\n'), ((6023, 6067), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.start'], {}), '(activity.start)\n', (6051, 6067), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((6100, 6142), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.end'], {}), '(activity.end)\n', (6128, 6142), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7661, 7709), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.start'], {}), '(activity.start)\n', (7693, 7709), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7820, 7866), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.end'], {}), '(activity.end)\n', (7852, 7866), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((8635, 8685), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['start_time']"}), "(milliseconds=speech_block['start_time'])\n", (8644, 8685), False, 'from datetime import timedelta\n'), ((8715, 8763), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['end_time']"}), "(milliseconds=speech_block['end_time'])\n", (8724, 8763), False, 'from datetime import timedelta\n'), ((9377, 9439), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "segment['start']['total_milliseconds']"}), "(milliseconds=segment['start']['total_milliseconds'])\n", (9386, 9439), False, 'from datetime import timedelta\n'), ((9469, 9529), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "segment['end']['total_milliseconds']"}), "(milliseconds=segment['end']['total_milliseconds'])\n", (9478, 9529), False, 'from datetime import timedelta\n'), ((2081, 2136), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['start_time']"], {}), "(speech_block['start_time'])\n", (2108, 2136), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((2170, 2223), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['end_time']"], {}), "(speech_block['end_time'])\n", (2197, 2223), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((3566, 3612), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["activity['start']"], {}), "(activity['start'])\n", (3593, 3612), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((3642, 3686), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["activity['end']"], {}), "(activity['end'])\n", (3669, 3686), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((2813, 2863), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['start_time']"}), "(milliseconds=speech_block['start_time'])\n", (2822, 2863), False, 'from datetime import timedelta\n'), ((2897, 2945), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['end_time']"}), "(milliseconds=speech_block['end_time'])\n", (2906, 2945), False, 'from datetime import timedelta\n')]
|
import sys
import urllib.parse as urlparse
print("Argumentos recibidos por STDIN: ")
try:
for line in sys.stdin:
url = 'foo.com/?' + line
parsed = urlparse.urlparse(url)
print('Recibido: {}'.format(urlparse.parse_qs(parsed.query)))
except:
ignorar = True
|
[
"urllib.parse.parse_qs",
"urllib.parse.urlparse"
] |
[((177, 199), 'urllib.parse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (194, 199), True, 'import urllib.parse as urlparse\n'), ((239, 270), 'urllib.parse.parse_qs', 'urlparse.parse_qs', (['parsed.query'], {}), '(parsed.query)\n', (256, 270), True, 'import urllib.parse as urlparse\n')]
|
# -------------------------------------------------------------------
# Copyright 2021 Virtex authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# -------------------------------------------------------------------
import asyncio
from functools import wraps
from typing import Callable, Any
from virtex.core.timing import now, async_now
def profile(profile_fn,
*fn_args,
tstamp_fn: Callable[[float, float], Any],
loop: asyncio.BaseEventLoop = None):
"""
Parameters
----------
profile_fn: ``Callable[Any, Any]``
Wrapped function
fn_args: ``Tuple[Any]``
Wrapped function arguments
tstamp_fn: ``Callable[[float, float], Any]``
A function that accepts a start_time,end_time
argument pair and returns the profile value
loop: ``Optional[asyncio.BaseEventLoop]``
Event loop to be used for async functions
"""
def _execute(func):
@wraps(func)
async def timeit_async(*args, **kwargs):
start_time = async_now(loop)
result = await func(*args, **kwargs)
end_time = async_now(loop)
profile_fn(*fn_args, tstamp_fn(start_time, end_time))
return result
@wraps(func)
def timeit(*args, **kwargs):
start_time = now()
result = func(*args, **kwargs)
end_time = now()
profile_fn(*fn_args, tstamp_fn(start_time, end_time))
return result
if asyncio.iscoroutinefunction(func):
assert loop is not None
return timeit_async
return timeit
return _execute
|
[
"virtex.core.timing.async_now",
"asyncio.iscoroutinefunction",
"functools.wraps",
"virtex.core.timing.now"
] |
[((1475, 1486), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1480, 1486), False, 'from functools import wraps\n'), ((1767, 1778), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1772, 1778), False, 'from functools import wraps\n'), ((2023, 2056), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (2050, 2056), False, 'import asyncio\n'), ((1561, 1576), 'virtex.core.timing.async_now', 'async_now', (['loop'], {}), '(loop)\n', (1570, 1576), False, 'from virtex.core.timing import now, async_now\n'), ((1649, 1664), 'virtex.core.timing.async_now', 'async_now', (['loop'], {}), '(loop)\n', (1658, 1664), False, 'from virtex.core.timing import now, async_now\n'), ((1841, 1846), 'virtex.core.timing.now', 'now', ([], {}), '()\n', (1844, 1846), False, 'from virtex.core.timing import now, async_now\n'), ((1913, 1918), 'virtex.core.timing.now', 'now', ([], {}), '()\n', (1916, 1918), False, 'from virtex.core.timing import now, async_now\n')]
|
# coding = utf-8
from abc import ABCMeta, abstractmethod
import torch
from Putil.torch.indicator.vision.object_detection import box
##@brief 计算iou
# @note
# @return
def _iou(x11, y11, x12, y12, x21, y21, x22, y22):
cap, cup = box._cap_cup(x11, y11, x12, y12, x21, y21, x22, y22)
return cap / cup
def _cap_cup_iou(cap, cup):
return cap / cup
##@brief 计算IoU,基于[batch, box, ...]进行计算,box的结构是[top_left_x, top_left_y, width, height],
# 返回的是[batch, 1, ...],第二维表示的是iou值,当前单元不存在gt_box的情况使用[0, 0, 0, 0]代表,
# 那么不同的iou,针对不存在gt的情况获得的值就不一样,需要特别注明 **一般情况下,计算一个batch的MeanIoU都是需要
# 进
# @note
class iou(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
##@brief 返回当前对象的准确iou值索引,有些的返回值可能有多个数据(包含过程数据以及基础iou等),需要该接口方便的返回对应iou的索引
# @return int 索引
@abstractmethod
def iou_index(self):
pass
@abstractmethod
def iou_mean(self, iou):
pass
class MeanIoU(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
def forward(self, iou, obj_gt):
iou_filtered = iou * obj_gt
iou = torch.nansum(iou_filtered) / ((torch.isnan(iou_filtered).eq(False) * obj_gt).sum() + 1e-32)
return iou
##@brief
# @note
class IoU(iou):
def iou_index(self):
return 0
def __init__(self):
iou.__init__(self)
pass
def forward(self, box1, box2):
box1 = box._tlwh_to_tlbr(box1)
box2 = box._tlwh_to_tlbr(box2)
x11, y11, x12, y12 = box._to_xyxy(box1)
x21, y21, x22, y22 = box._to_xyxy(box2)
iou = _iou(x11, y11, x12, y12, x21, y21, x22, y22)
return iou,
|
[
"Putil.torch.indicator.vision.object_detection.box._cap_cup",
"Putil.torch.indicator.vision.object_detection.box._tlwh_to_tlbr",
"torch.nansum",
"torch.isnan",
"torch.nn.Module.__init__",
"Putil.torch.indicator.vision.object_detection.box._to_xyxy"
] |
[((232, 284), 'Putil.torch.indicator.vision.object_detection.box._cap_cup', 'box._cap_cup', (['x11', 'y11', 'x12', 'y12', 'x21', 'y21', 'x22', 'y22'], {}), '(x11, y11, x12, y12, x21, y21, x22, y22)\n', (244, 284), False, 'from Putil.torch.indicator.vision.object_detection import box\n'), ((652, 682), 'torch.nn.Module.__init__', 'torch.nn.Module.__init__', (['self'], {}), '(self)\n', (676, 682), False, 'import torch\n'), ((982, 1012), 'torch.nn.Module.__init__', 'torch.nn.Module.__init__', (['self'], {}), '(self)\n', (1006, 1012), False, 'import torch\n'), ((1425, 1448), 'Putil.torch.indicator.vision.object_detection.box._tlwh_to_tlbr', 'box._tlwh_to_tlbr', (['box1'], {}), '(box1)\n', (1442, 1448), False, 'from Putil.torch.indicator.vision.object_detection import box\n'), ((1464, 1487), 'Putil.torch.indicator.vision.object_detection.box._tlwh_to_tlbr', 'box._tlwh_to_tlbr', (['box2'], {}), '(box2)\n', (1481, 1487), False, 'from Putil.torch.indicator.vision.object_detection import box\n'), ((1517, 1535), 'Putil.torch.indicator.vision.object_detection.box._to_xyxy', 'box._to_xyxy', (['box1'], {}), '(box1)\n', (1529, 1535), False, 'from Putil.torch.indicator.vision.object_detection import box\n'), ((1565, 1583), 'Putil.torch.indicator.vision.object_detection.box._to_xyxy', 'box._to_xyxy', (['box2'], {}), '(box2)\n', (1577, 1583), False, 'from Putil.torch.indicator.vision.object_detection import box\n'), ((1113, 1139), 'torch.nansum', 'torch.nansum', (['iou_filtered'], {}), '(iou_filtered)\n', (1125, 1139), False, 'import torch\n'), ((1144, 1169), 'torch.isnan', 'torch.isnan', (['iou_filtered'], {}), '(iou_filtered)\n', (1155, 1169), False, 'import torch\n')]
|
# today is 389f
# the python pit
# magPi - 05
# MOUNTAINS
import os, pygame; from pygame.locals import *
pygame.init(); clock = pygame.time.Clock()
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("Mountains")
screen=pygame.display.set_mode([600,382],0,32)
sky = pygame.Surface((600,255))
r=0; g=64; b=128
for l in range (0,255):
pygame.draw.rect(sky,(r,g,b),(0,l-1,600,l))
r=r+1;g=g+1;b=b+1
if r>=255: r=255
if g>=255: g=255
if b>=255: b=255
ground = pygame.Surface((600,128))
r=192; g=255; b=192
for l in range (0,128):
pygame.draw.rect(ground,(r,g,b),(0,l-2,600,l))
r=r-2;g=g-2;b=b-2
if r<=0: r=0
if g<=0: g=0
if b<=0: b=0
# Add in an extra surface for the mountains
mountain = pygame.Surface((600,128))
mountain.set_colorkey([0,0,0]) # Black is transparent
r=96; g=64; b=255
for l in range (0,128):
pygame.draw.rect(mountain,(r,g,b),(0,l-2,600,l))
r=r+2;g=g+2;b=b+2
if r>=255: r=255
if g>=255: g=255
if b>=255: b=255
# Draw some black (Transparent) polygons to create mountain peaks
# The screen is 600 wide so I've drawn 10 polygons at 60 pixels wide each
pygame.draw.polygon(mountain,[0,0,0],[(0,0),(60,0),(60,10),(0,40)])
pygame.draw.polygon(mountain,[0,0,0],[(60,0),(120,0),(120,30),(60,10)])
pygame.draw.polygon(mountain,[0,0,0],[(120,0),(180,0),(180,20),(120,30)])
pygame.draw.polygon(mountain,[0,0,0],[(180,0),(240,0),(240,50),(180,20)])
pygame.draw.polygon(mountain,[0,0,0],[(240,0),(300,0),(300,40),(240,50)])
pygame.draw.polygon(mountain,[0,0,0],[(300,0),(360,0),(360,10),(300,40)])
pygame.draw.polygon(mountain,[0,0,0],[(360,0),(420,0),(420,35),(360,10)])
pygame.draw.polygon(mountain,[0,0,0],[(420,0),(480,0),(480,45),(420,35)])
pygame.draw.polygon(mountain,[0,0,0],[(480,0),(540,0),(540,42),(480,45)])
pygame.draw.polygon(mountain,[0,0,0],[(540,0),(600,0),(600,15),(540,42)])
screen.blit(sky,(0,0))
screen.blit(ground,(0,255))
screen.blit(mountain,(0,128))
pygame.display.update()
pygame.time.wait(30000)
|
[
"pygame.draw.polygon",
"pygame.init",
"pygame.Surface",
"pygame.time.wait",
"pygame.display.set_mode",
"pygame.time.Clock",
"pygame.draw.rect",
"pygame.display.set_caption",
"pygame.display.update"
] |
[((107, 120), 'pygame.init', 'pygame.init', ([], {}), '()\n', (118, 120), False, 'import os, pygame\n'), ((130, 149), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (147, 149), False, 'import os, pygame\n'), ((196, 235), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Mountains"""'], {}), "('Mountains')\n", (222, 235), False, 'import os, pygame\n'), ((243, 285), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[600, 382]', '(0)', '(32)'], {}), '([600, 382], 0, 32)\n', (266, 285), False, 'import os, pygame\n'), ((290, 316), 'pygame.Surface', 'pygame.Surface', (['(600, 255)'], {}), '((600, 255))\n', (304, 316), False, 'import os, pygame\n'), ((500, 526), 'pygame.Surface', 'pygame.Surface', (['(600, 128)'], {}), '((600, 128))\n', (514, 526), False, 'import os, pygame\n'), ((750, 776), 'pygame.Surface', 'pygame.Surface', (['(600, 128)'], {}), '((600, 128))\n', (764, 776), False, 'import os, pygame\n'), ((1151, 1229), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(0, 0), (60, 0), (60, 10), (0, 40)]'], {}), '(mountain, [0, 0, 0], [(0, 0), (60, 0), (60, 10), (0, 40)])\n', (1170, 1229), False, 'import os, pygame\n'), ((1219, 1305), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(60, 0), (120, 0), (120, 30), (60, 10)]'], {}), '(mountain, [0, 0, 0], [(60, 0), (120, 0), (120, 30), (60,\n 10)])\n', (1238, 1305), False, 'import os, pygame\n'), ((1291, 1380), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(120, 0), (180, 0), (180, 20), (120, 30)]'], {}), '(mountain, [0, 0, 0], [(120, 0), (180, 0), (180, 20), (\n 120, 30)])\n', (1310, 1380), False, 'import os, pygame\n'), ((1365, 1454), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(180, 0), (240, 0), (240, 50), (180, 20)]'], {}), '(mountain, [0, 0, 0], [(180, 0), (240, 0), (240, 50), (\n 180, 20)])\n', (1384, 1454), False, 'import os, pygame\n'), ((1439, 1528), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(240, 0), (300, 0), (300, 40), (240, 50)]'], {}), '(mountain, [0, 0, 0], [(240, 0), (300, 0), (300, 40), (\n 240, 50)])\n', (1458, 1528), False, 'import os, pygame\n'), ((1513, 1602), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(300, 0), (360, 0), (360, 10), (300, 40)]'], {}), '(mountain, [0, 0, 0], [(300, 0), (360, 0), (360, 10), (\n 300, 40)])\n', (1532, 1602), False, 'import os, pygame\n'), ((1587, 1676), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(360, 0), (420, 0), (420, 35), (360, 10)]'], {}), '(mountain, [0, 0, 0], [(360, 0), (420, 0), (420, 35), (\n 360, 10)])\n', (1606, 1676), False, 'import os, pygame\n'), ((1661, 1750), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(420, 0), (480, 0), (480, 45), (420, 35)]'], {}), '(mountain, [0, 0, 0], [(420, 0), (480, 0), (480, 45), (\n 420, 35)])\n', (1680, 1750), False, 'import os, pygame\n'), ((1735, 1824), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(480, 0), (540, 0), (540, 42), (480, 45)]'], {}), '(mountain, [0, 0, 0], [(480, 0), (540, 0), (540, 42), (\n 480, 45)])\n', (1754, 1824), False, 'import os, pygame\n'), ((1809, 1898), 'pygame.draw.polygon', 'pygame.draw.polygon', (['mountain', '[0, 0, 0]', '[(540, 0), (600, 0), (600, 15), (540, 42)]'], {}), '(mountain, [0, 0, 0], [(540, 0), (600, 0), (600, 15), (\n 540, 42)])\n', (1828, 1898), False, 'import os, pygame\n'), ((1965, 1988), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1986, 1988), False, 'import os, pygame\n'), ((1989, 2012), 'pygame.time.wait', 'pygame.time.wait', (['(30000)'], {}), '(30000)\n', (2005, 2012), False, 'import os, pygame\n'), ((361, 413), 'pygame.draw.rect', 'pygame.draw.rect', (['sky', '(r, g, b)', '(0, l - 1, 600, l)'], {}), '(sky, (r, g, b), (0, l - 1, 600, l))\n', (377, 413), False, 'import os, pygame\n'), ((574, 629), 'pygame.draw.rect', 'pygame.draw.rect', (['ground', '(r, g, b)', '(0, l - 2, 600, l)'], {}), '(ground, (r, g, b), (0, l - 2, 600, l))\n', (590, 629), False, 'import os, pygame\n'), ((876, 933), 'pygame.draw.rect', 'pygame.draw.rect', (['mountain', '(r, g, b)', '(0, l - 2, 600, l)'], {}), '(mountain, (r, g, b), (0, l - 2, 600, l))\n', (892, 933), False, 'import os, pygame\n')]
|
"""
Handles reports
"""
from parkour.env import env
from parkour.utils import normalize_name
import asyncio
import aiotfm
import time
class Reports(aiotfm.Client):
def __init__(self, *args, **kwargs):
self.rep_id = 0
self.reports = {}
self.reported = []
self.reporters = []
super().__init__(*args, **kwargs)
self.loop.create_task(self.check_reports())
async def check_reports(self):
while not self.main.open:
await asyncio.sleep(3.0)
while self.main.open:
now = time.time()
to_remove = []
for report, data in self.reports.items():
# reporter, reported, sent to discord,
# discord date, expiration date
if not data[2] and now >= data[3]:
data[2] = True
await self.report_discord(report)
elif now >= data[4]: # expired
self.reported.remove(data[1])
to_remove.append(report)
await self.mod_chat.channel.send(
"Report id {} has expired.".format(report)
)
for report in to_remove:
del self.reports[report]
await asyncio.sleep(30.0)
async def report_discord(self, report):
reporter, reported = self.reports[report][:2]
file = await self.load_player_file(reported)
if file is None:
room = "unknown"
else:
room = file["room"]
await self.send_channel(
env.report_channel,
"@everyone `{}` reported `{}` (room: `{}`, report id: `{}`). "
"Connect to the game and use the handle command in modchat."
.format(reporter, reported, room, report)
)
def report_cooldown(self, name):
reports = 0
remove_until = -1
now = time.time()
for index, (expire, reporter) in enumerate(self.reporters):
if now >= expire:
remove_until = index
elif reporter == name:
reports += 1
if remove_until >= 0:
del self.reporters[:remove_until + 1]
if reports >= 2:
return True
return False
async def on_channel_command(self, channel, name, author, ranks, cmd, args):
if name == "mod":
if cmd == "handle":
if (not ranks["admin"]
and not ranks["mod"]
and not ranks["trainee"]):
return True
if not args or not args[0].isdigit():
await channel.send("Usage: .handle [id] (silent?)")
return True
rep_id = int(args[0])
if len(args) > 1:
silent = args[1].lower() in ("silent", "silence", "s")
else:
silent = False
if rep_id not in self.reports:
return await channel.send("Report id {} not found".format(rep_id))
report = self.reports[rep_id]
del self.reports[rep_id]
file = await self.load_player_file(report[1])
if file is None:
extra = "Could not get reported player information."
else:
extra = "Sent you the player's room in whispers."
await self.whisper(
author,
"{}'s room: {}".format(report[1], file["room"])
)
await channel.send(
"{} will be handling the report {}. {}"
.format(author, rep_id, extra)
)
if not silent:
await self.whisper(
report[0],
"A parkour moderator is now handling your report."
)
else:
return False
else:
return False
return True
async def on_whisper_command(self, whisper, author, ranks, cmd, args):
if await super().on_whisper_command(
whisper, author, ranks, cmd, args
):
return True
if cmd == "norep":
if not ranks["admin"] and not ranks["mod"]:
return True
if not args:
await whisper.reply("Usage: .norep Username#0000")
return True
target = normalize_name(args[0])
pid, name, online = await self.get_player_info(target)
if name is None or not online:
await whisper.reply("That player ({}) is not online.".format(target))
return True
file = await self.load_player_file(name, online_check=False)
if file is None:
await whisper.reply("Could not load {}'s file.".format(name))
return True
file["report"] = not file["report"]
if not await self.save_player_file(
name, file, "report", online_check=False
):
await whisper.reply("Could not modify {}'s file.".format(name))
return True
action = "enabled" if file["report"] else "disabled"
await self.send_webhook(
env.webhooks.sanctions,
"**`[NOREP]:`** `{}` has {} reports from `{}` (ID: `{}`)"
.format(author, action, name, pid)
)
await whisper.reply(
"Reports from {} (ID: {}) have been {}."
.format(name, pid, action)
)
elif cmd == "report":
# Argument check
if not args:
await whisper.reply("Usage: .report Username#0000")
return True
reported = normalize_name(args[0])
if reported == author:
await whisper.reply("Why are you trying to report yourself?")
return True
pid, name, online = await self.get_player_info(reported)
if name is None or not online:
await whisper.reply("That player ({}) is not online.".format(reported))
return True
await whisper.reply("Your report of the player {} will be handled shortly.".format(reported))
# Player information check
if self.report_cooldown(author):
return True
if reported in self.reported:
return True
file = await self.load_player_file(author, online_check=False)
if file is None or not file["report"]:
return True
file = await self.load_player_file(reported, online_check=False)
if file is None:
return True
now = self.tfm_time()
if now < file.get("killed", 0):
return
ban = file.get("banned", 0)
if ban == 2 or now < ban:
return True
# Create report
report = self.rep_id
self.rep_id += 1
online = len(self.mod_chat.players) - 1
now = time.time()
self.reports[report] = [
author, reported, online == 0,
now + 60 * 5, now + 60 * 30
]
self.reported.append(reported)
self.reporters.append((now + 60 * 5, author))
if online == 0:
await self.report_discord(report)
else:
await self.mod_chat.channel.send(
"{} reported {} (report id: {}) (room: {}) "
"(use the handle command here before handling it)"
.format(author, reported, report, file["room"])
)
else:
return False
return True
|
[
"parkour.utils.normalize_name",
"time.time",
"asyncio.sleep"
] |
[((1546, 1557), 'time.time', 'time.time', ([], {}), '()\n', (1555, 1557), False, 'import time\n'), ((493, 504), 'time.time', 'time.time', ([], {}), '()\n', (502, 504), False, 'import time\n'), ((3432, 3455), 'parkour.utils.normalize_name', 'normalize_name', (['args[0]'], {}), '(args[0])\n', (3446, 3455), False, 'from parkour.utils import normalize_name\n'), ((440, 458), 'asyncio.sleep', 'asyncio.sleep', (['(3.0)'], {}), '(3.0)\n', (453, 458), False, 'import asyncio\n'), ((1011, 1030), 'asyncio.sleep', 'asyncio.sleep', (['(30.0)'], {}), '(30.0)\n', (1024, 1030), False, 'import asyncio\n'), ((4491, 4514), 'parkour.utils.normalize_name', 'normalize_name', (['args[0]'], {}), '(args[0])\n', (4505, 4514), False, 'from parkour.utils import normalize_name\n'), ((5537, 5548), 'time.time', 'time.time', ([], {}), '()\n', (5546, 5548), False, 'import time\n')]
|
from joblib import load
from os.path import join
import argparse
import numpy as np
import matplotlib.pyplot as plt
from mvmm_sim.simulation.sim_viz import save_fig
from mvmm_sim.data_analysis.utils import load_data
from mvmm_sim.simulation.utils import make_and_get_dir
from mvmm_sim.mouse_et.MouseETPaths import MouseETPaths
from mvmm_sim.mouse_et.raw_ephys_loading import load_raw_ephys
from mvmm_sim.mouse_et.ephys_viz import get_ephys_super_data,\
plot_top_clust_ephys_curves, plot_cluster_ephys_curve
parser = argparse.\
ArgumentParser(description='Cluster interpretation.')
parser.add_argument('--results_dir', default=None,
help='Directory to save results.')
parser.add_argument('--fpaths', nargs='+',
help='Paths to data sets.')
args = parser.parse_args()
inches = 8
n_top_clust = 10
results_dir = args.results_dir
fpaths = args.fpaths
fitting_dir = join(results_dir, 'model_fitting')
ephys_viz_dir = join(results_dir, 'interpret', 'bd_mvmm', 'ephys_pca_feats')
# load models and data
models = load(join(fitting_dir, 'selected_models'))
view_data, dataset_names, sample_names, view_feat_names = load_data(*fpaths)
# load raw ephys data
orig_data_dir = join(MouseETPaths().raw_data_dir, 'inh_patchseq_spca_files',
'orig_data_csv')
ephys_raw = load_raw_ephys(orig_data_dir, concat=False)
for k in ephys_raw.keys():
ephys_raw[k] = ephys_raw[k].loc[sample_names]
print(k, ephys_raw[k].shape)
n_datasets = len(ephys_raw)
# get data for plotting
v = 1
cluster_super_means, super_data_means, super_data_stds, y_cnts = \
get_ephys_super_data(model=models['bd_mvmm'].final_.view_models_[v],
fit_data=view_data[v],
ephys_raw=ephys_raw)
clust_labels = ['cluster_{}'.format(cl_idx + 1)
for cl_idx in range(len(y_cnts))]
# plot top several clusters
plot_top_clust_ephys_curves(cluster_super_means,
y_cnts=y_cnts,
overall_means=super_data_means,
overall_stds=super_data_stds,
clust_labels=clust_labels,
n_to_show=n_top_clust,
inches=inches)
save_fig(join(ephys_viz_dir, 'ephys_curves_top_clust.png'))
# plot each (non-trival) cluster
# non_trivial_clusters = y_cnts[y_cnts >= 5].index.values
non_trivial_clusters = y_cnts[y_cnts >= 0].index.values
save_dir = make_and_get_dir(ephys_viz_dir, 'cluster_curves')
for cl_idx in non_trivial_clusters:
label = clust_labels[cl_idx]
values = {}
for name in cluster_super_means.keys():
values[name] = cluster_super_means[name][cl_idx]
plt.figure(figsize=(2 * n_datasets * inches, inches))
plot_cluster_ephys_curve(values,
overall_means=super_data_means,
overall_stds=super_data_stds,
y_label=label)
save_fig(join(save_dir, '{}_ephys_curve.png'.format(label)))
|
[
"mvmm_sim.mouse_et.raw_ephys_loading.load_raw_ephys",
"mvmm_sim.simulation.utils.make_and_get_dir",
"mvmm_sim.mouse_et.ephys_viz.plot_cluster_ephys_curve",
"mvmm_sim.data_analysis.utils.load_data",
"argparse.ArgumentParser",
"mvmm_sim.mouse_et.MouseETPaths.MouseETPaths",
"os.path.join",
"mvmm_sim.mouse_et.ephys_viz.get_ephys_super_data",
"matplotlib.pyplot.figure",
"mvmm_sim.mouse_et.ephys_viz.plot_top_clust_ephys_curves"
] |
[((523, 585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Cluster interpretation."""'}), "(description='Cluster interpretation.')\n", (546, 585), False, 'import argparse\n'), ((915, 949), 'os.path.join', 'join', (['results_dir', '"""model_fitting"""'], {}), "(results_dir, 'model_fitting')\n", (919, 949), False, 'from os.path import join\n'), ((966, 1026), 'os.path.join', 'join', (['results_dir', '"""interpret"""', '"""bd_mvmm"""', '"""ephys_pca_feats"""'], {}), "(results_dir, 'interpret', 'bd_mvmm', 'ephys_pca_feats')\n", (970, 1026), False, 'from os.path import join\n'), ((1162, 1180), 'mvmm_sim.data_analysis.utils.load_data', 'load_data', (['*fpaths'], {}), '(*fpaths)\n', (1171, 1180), False, 'from mvmm_sim.data_analysis.utils import load_data\n'), ((1332, 1375), 'mvmm_sim.mouse_et.raw_ephys_loading.load_raw_ephys', 'load_raw_ephys', (['orig_data_dir'], {'concat': '(False)'}), '(orig_data_dir, concat=False)\n', (1346, 1375), False, 'from mvmm_sim.mouse_et.raw_ephys_loading import load_raw_ephys\n'), ((1617, 1733), 'mvmm_sim.mouse_et.ephys_viz.get_ephys_super_data', 'get_ephys_super_data', ([], {'model': "models['bd_mvmm'].final_.view_models_[v]", 'fit_data': 'view_data[v]', 'ephys_raw': 'ephys_raw'}), "(model=models['bd_mvmm'].final_.view_models_[v],\n fit_data=view_data[v], ephys_raw=ephys_raw)\n", (1637, 1733), False, 'from mvmm_sim.mouse_et.ephys_viz import get_ephys_super_data, plot_top_clust_ephys_curves, plot_cluster_ephys_curve\n'), ((1909, 2107), 'mvmm_sim.mouse_et.ephys_viz.plot_top_clust_ephys_curves', 'plot_top_clust_ephys_curves', (['cluster_super_means'], {'y_cnts': 'y_cnts', 'overall_means': 'super_data_means', 'overall_stds': 'super_data_stds', 'clust_labels': 'clust_labels', 'n_to_show': 'n_top_clust', 'inches': 'inches'}), '(cluster_super_means, y_cnts=y_cnts,\n overall_means=super_data_means, overall_stds=super_data_stds,\n clust_labels=clust_labels, n_to_show=n_top_clust, inches=inches)\n', (1936, 2107), False, 'from mvmm_sim.mouse_et.ephys_viz import get_ephys_super_data, plot_top_clust_ephys_curves, plot_cluster_ephys_curve\n'), ((2488, 2537), 'mvmm_sim.simulation.utils.make_and_get_dir', 'make_and_get_dir', (['ephys_viz_dir', '"""cluster_curves"""'], {}), "(ephys_viz_dir, 'cluster_curves')\n", (2504, 2537), False, 'from mvmm_sim.simulation.utils import make_and_get_dir\n'), ((1066, 1102), 'os.path.join', 'join', (['fitting_dir', '"""selected_models"""'], {}), "(fitting_dir, 'selected_models')\n", (1070, 1102), False, 'from os.path import join\n'), ((2278, 2327), 'os.path.join', 'join', (['ephys_viz_dir', '"""ephys_curves_top_clust.png"""'], {}), "(ephys_viz_dir, 'ephys_curves_top_clust.png')\n", (2282, 2327), False, 'from os.path import join\n'), ((2731, 2784), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * n_datasets * inches, inches)'}), '(figsize=(2 * n_datasets * inches, inches))\n', (2741, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2903), 'mvmm_sim.mouse_et.ephys_viz.plot_cluster_ephys_curve', 'plot_cluster_ephys_curve', (['values'], {'overall_means': 'super_data_means', 'overall_stds': 'super_data_stds', 'y_label': 'label'}), '(values, overall_means=super_data_means,\n overall_stds=super_data_stds, y_label=label)\n', (2814, 2903), False, 'from mvmm_sim.mouse_et.ephys_viz import get_ephys_super_data, plot_top_clust_ephys_curves, plot_cluster_ephys_curve\n'), ((1226, 1240), 'mvmm_sim.mouse_et.MouseETPaths.MouseETPaths', 'MouseETPaths', ([], {}), '()\n', (1238, 1240), False, 'from mvmm_sim.mouse_et.MouseETPaths import MouseETPaths\n')]
|
#!/usr/bin/env python
# Copyright (c) 2009-2016 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from __future__ import print_function
from gimmemotifs.comparison import MotifComparer
from gimmemotifs.motif import pwmfile_to_motifs, Motif
from gimmemotifs.plot import match_plot
def match(args):
sample = dict([(m.id, m) for m in pwmfile_to_motifs(args.pwmfile)])
db = dict([(m.id, m) for m in pwmfile_to_motifs(args.dbpwmfile)])
mc = MotifComparer()
result = mc.get_closest_match(sample.values(), db.values(), "partial", "wic", "mean")
print("Motif\tMatch\tScore\tP-value")
for motif, match in result.items():
pval, pos, orient = mc.compare_motifs(sample[motif], db[match[0]], "partial", "wic", "mean", pval=True)
print("%s\t%s\t%0.2f\t%0.3e" % (motif, match[0], match[1][0], pval))
if args.img:
plotdata = []
for query, match in result.items():
motif = sample[query]
dbmotif = db[match[0]]
pval, pos, orient = mc.compare_motifs(motif, dbmotif, "partial", "wic", "mean", pval=True)
if orient == -1:
tmp = dbmotif.id
dbmotif = dbmotif.rc()
dbmotif.id = tmp
if pos < 0:
tmp = motif.id
motif = Motif([[0.25,0.25,0.25,0.25]] * -pos + motif.pwm)
motif.id = tmp
elif pos > 0:
tmp = dbmotif.id
dbmotif = Motif([[0.25,0.25,0.25,0.25]] * pos + dbmotif.pwm)
dbmotif.id = tmp
plotdata.append((motif, dbmotif, pval))
match_plot(plotdata, args.img)
|
[
"gimmemotifs.motif.pwmfile_to_motifs",
"gimmemotifs.motif.Motif",
"gimmemotifs.comparison.MotifComparer",
"gimmemotifs.plot.match_plot"
] |
[((590, 605), 'gimmemotifs.comparison.MotifComparer', 'MotifComparer', ([], {}), '()\n', (603, 605), False, 'from gimmemotifs.comparison import MotifComparer\n'), ((1766, 1796), 'gimmemotifs.plot.match_plot', 'match_plot', (['plotdata', 'args.img'], {}), '(plotdata, args.img)\n', (1776, 1796), False, 'from gimmemotifs.plot import match_plot\n'), ((476, 507), 'gimmemotifs.motif.pwmfile_to_motifs', 'pwmfile_to_motifs', (['args.pwmfile'], {}), '(args.pwmfile)\n', (493, 507), False, 'from gimmemotifs.motif import pwmfile_to_motifs, Motif\n'), ((544, 577), 'gimmemotifs.motif.pwmfile_to_motifs', 'pwmfile_to_motifs', (['args.dbpwmfile'], {}), '(args.dbpwmfile)\n', (561, 577), False, 'from gimmemotifs.motif import pwmfile_to_motifs, Motif\n'), ((1451, 1503), 'gimmemotifs.motif.Motif', 'Motif', (['([[0.25, 0.25, 0.25, 0.25]] * -pos + motif.pwm)'], {}), '([[0.25, 0.25, 0.25, 0.25]] * -pos + motif.pwm)\n', (1456, 1503), False, 'from gimmemotifs.motif import pwmfile_to_motifs, Motif\n'), ((1617, 1670), 'gimmemotifs.motif.Motif', 'Motif', (['([[0.25, 0.25, 0.25, 0.25]] * pos + dbmotif.pwm)'], {}), '([[0.25, 0.25, 0.25, 0.25]] * pos + dbmotif.pwm)\n', (1622, 1670), False, 'from gimmemotifs.motif import pwmfile_to_motifs, Motif\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
from sklearn.linear_model import LinearRegression
from alphamind.data.neutralize import neutralize
def benchmark_neutralize(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting least square fitting benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
calc_res = neutralize(x, y)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
benchmark_model = LinearRegression(fit_intercept=False)
benchmark_model.fit(x, y)
exp_res = y - x @ benchmark_model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
np.testing.assert_array_almost_equal(calc_res, exp_res)
def benchmark_neutralize_with_groups(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting least square fitting with group benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = neutralize(x, y, groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
model = LinearRegression(fit_intercept=False)
for _ in range(n_loops):
for i in range(n_groups):
curr_x = x[groups == i]
curr_y = y[groups == i]
model.fit(curr_x, curr_y)
_ = curr_y - curr_x @ model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
|
[
"numpy.testing.assert_array_almost_equal",
"alphamind.data.neutralize.neutralize",
"datetime.datetime.now",
"numpy.random.randint",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression"
] |
[((591, 620), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(5)'], {}), '(n_samples, 5)\n', (606, 620), True, 'import numpy as np\n'), ((630, 668), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (645, 668), True, 'import numpy as np\n'), ((684, 701), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (699, 701), True, 'import datetime as dt\n'), ((907, 924), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (922, 924), True, 'import datetime as dt\n'), ((1244, 1299), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['calc_res', 'exp_res'], {}), '(calc_res, exp_res)\n', (1280, 1299), True, 'import numpy as np\n'), ((1968, 1997), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(5)'], {}), '(n_samples, 5)\n', (1983, 1997), True, 'import numpy as np\n'), ((2007, 2045), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (2022, 2045), True, 'import numpy as np\n'), ((2060, 2103), 'numpy.random.randint', 'np.random.randint', (['n_groups'], {'size': 'n_samples'}), '(n_groups, size=n_samples)\n', (2077, 2103), True, 'import numpy as np\n'), ((2119, 2136), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2134, 2136), True, 'import datetime as dt\n'), ((2343, 2360), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2358, 2360), True, 'import datetime as dt\n'), ((2376, 2413), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (2392, 2413), False, 'from sklearn.linear_model import LinearRegression\n'), ((752, 768), 'alphamind.data.neutralize.neutralize', 'neutralize', (['x', 'y'], {}), '(x, y)\n', (762, 768), False, 'from alphamind.data.neutralize import neutralize\n'), ((792, 809), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (807, 809), True, 'import datetime as dt\n'), ((982, 1019), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (998, 1019), False, 'from sklearn.linear_model import LinearRegression\n'), ((1134, 1151), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1149, 1151), True, 'import datetime as dt\n'), ((2180, 2204), 'alphamind.data.neutralize.neutralize', 'neutralize', (['x', 'y', 'groups'], {}), '(x, y, groups)\n', (2190, 2204), False, 'from alphamind.data.neutralize import neutralize\n'), ((2228, 2245), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2243, 2245), True, 'import datetime as dt\n'), ((2669, 2686), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2684, 2686), True, 'import datetime as dt\n')]
|
from __future__ import annotations
from math import log
from typing import List, Type, Union
from imm import MuteState, Sequence, lprob_add, lprob_zero
from nmm import (
AminoAlphabet,
AminoLprob,
BaseLprob,
CodonLprob,
CodonMarg,
DNAAlphabet,
FrameState,
RNAAlphabet,
codon_iter,
)
from .codon_table import CodonTable
from .hmmer_model import HMMERModel
from .model import AltModel, EntryDistr, Node, NullModel, SpecialNode, Transitions
from .profile import Profile, ProfileID
__all__ = ["ProteinProfile", "create_profile"]
class ProteinProfile(Profile):
@classmethod
def create(
cls: Type[ProteinProfile],
profid: ProfileID,
factory: ProteinStateFactory,
null_aminot: AminoLprob,
core_nodes: List[Node],
core_trans: List[Transitions],
entry_distr: EntryDistr,
) -> ProteinProfile:
base_alphabet = factory.genetic_code.base_alphabet
R = factory.create(b"R", null_aminot)
null_model = NullModel.create(R)
special_node = SpecialNode(
S=MuteState.create(b"S", base_alphabet),
N=factory.create(b"N", null_aminot),
B=MuteState.create(b"B", base_alphabet),
E=MuteState.create(b"E", base_alphabet),
J=factory.create(b"J", null_aminot),
C=factory.create(b"C", null_aminot),
T=MuteState.create(b"T", base_alphabet),
)
alt_model = AltModel.create(
special_node,
core_nodes,
core_trans,
entry_distr,
)
# alt_model.set_fragment_length(self._special_transitions)
return cls(profid, base_alphabet, null_model, alt_model, False)
# @property
# def epsilon(self) -> float:
# nodes = self._alt_model.core_nodes()
# return nodes[0].M.epsilon
# @classmethod
# def create_from_binary(
# cls: Type[ProteinProfile],
# profid: ProfileID,
# null_model: nmm.Model,
# alt_model: nmm.Model,
# ):
# special_node = wrap.special_node(alt_model.hmm)
# core_nodes = wrap.core_nodes(alt_model.hmm)
# alt = AltModel.create_from_hmm(
# special_node, core_nodes, alt_model.hmm, alt_model.dp
# )
# null = NullModel.create_from_hmm(null_model.hmm)
# return cls(profid, alt_model.hmm.alphabet, null, alt, False)
# @property
# def window_length(self) -> int:
# return super().window_length
# @window_length.setter
# def window_length(self, length: int) -> None:
# if length < -1:
# raise ValueError("Length must be greater than or equal to -1.")
# if length == -1:
# length = 2 * 3 * self._alt_model.core_length
# self._window_length = length
def create_sequence(self, sequence: bytes) -> Sequence:
return Sequence.create(sequence, self.alphabet)
@property
def null_model(self) -> NullModel:
return self._null_model
@property
def alt_model(self) -> AltModel:
return self._alt_model
# def search(self, sequence: SequenceABC) -> SearchResults:
# self._set_target_length_model(len(sequence))
# alt_results = self._alt_model.viterbi(sequence, self.window_length)
# def create_fragment(
# seq: SequenceABC, path: Path, homologous: bool
# ):
# return ProteinFragment(seq, path, homologous)
# search_results = SearchResults(sequence, create_fragment)
# for alt_result in alt_results:
# subseq = alt_result.sequence
# # TODO: temporary fix for reading from binary file
# # and consequently alt and null model having different alphabets
# s = Sequence.create(bytes(subseq), self._null_model.hmm.alphabet)
# viterbi_score0 = self._null_model.loglikelihood(s)
# if len(alt_result.path) == 0:
# viterbi_score1 = lprob_invalid()
# else:
# viterbi_score1 = self._alt_model.loglikelihood(alt_result.sequence,
# alt_result.path)
# score = viterbi_score1 - viterbi_score0
# window = Interval(subseq.start, subseq.start + len(subseq))
# search_results.append(
# score, window, alt_result.path, viterbi_score1, viterbi_score0
# )
# return search_results
# def create_profile(
# hmm: HMMERModel,
# base_abc: Union[RNAAlphabet, DNAAlphabet],
# window_length: int = 0,
# epsilon: float = 0.1,
# ) -> ProteinProfile:
# amino_abc = hmm.alphabet
# assert isinstance(amino_abc, AminoAlphabet)
# lprobs = lprob_normalize(hmm.insert_lprobs(0))
# null_aminot = AminoLprob.create(amino_abc, lprobs)
# factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
# nodes: List[Node] = []
# for m in range(1, hmm.model_length + 1):
# lprobs = lprob_normalize(hmm.match_lprobs(m))
# M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# lprobs = lprob_normalize(hmm.insert_lprobs(m))
# I = factory.create(f"I{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# D = MuteState.create(f"D{m}".encode(), base_abc)
# nodes.append(Node(M, I, D))
# trans: List[Transitions] = []
# for t in hmm.transitions:
# t.normalize()
# trans.append(t)
# profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
# prof = ProteinProfile.create(
# profid, factory, null_aminot, nodes, trans, EntryDistr.UNIFORM
# )
# prof.window_length = window_length
# return prof
def create_profile(
hmm: HMMERModel,
base_abc: Union[RNAAlphabet, DNAAlphabet],
window_length: int = 0,
epsilon: float = 0.1,
) -> ProteinProfile:
amino_abc = hmm.alphabet
assert isinstance(amino_abc, AminoAlphabet)
null_lprobs = hmm.null_lprobs
null_log_odds = [0.0] * len(null_lprobs)
null_aminot = AminoLprob.create(amino_abc, null_lprobs)
factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
nodes: List[Node] = []
for m in range(1, hmm.model_length + 1):
lodds = [v0 - v1 for v0, v1 in zip(hmm.match_lprobs(m), null_lprobs)]
M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lodds))
I = factory.create(
f"I{m}".encode(), AminoLprob.create(amino_abc, null_log_odds)
)
D = MuteState.create(f"D{m}".encode(), base_abc)
nodes.append(Node(M, I, D))
trans = hmm.transitions
profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
entry_distr = EntryDistr.OCCUPANCY
prof = ProteinProfile.create(
profid, factory, null_aminot, nodes, trans, entry_distr
)
prof.window_length = window_length
return prof
class ProteinStateFactory:
def __init__(
self,
gcode: CodonTable,
epsilon: float,
):
self._gcode = gcode
self._epsilon = epsilon
def create(self, name: bytes, aminot: AminoLprob) -> FrameState:
codonp = _create_codon_prob(aminot, self._gcode)
baset = _create_base_table(codonp)
codonm = CodonMarg.create(codonp)
return FrameState.create(name, baset, codonm, self._epsilon)
@property
def genetic_code(self) -> CodonTable:
return self._gcode
@property
def epsilon(self) -> float:
return self._epsilon
def _create_base_table(codonp: CodonLprob):
base_abc = codonp.alphabet
base_lprob = {base: lprob_zero() for base in base_abc.symbols}
norm = log(3)
for codon in codon_iter(base_abc):
lprob = codonp.get_lprob(codon)
triplet = codon.symbols
base_lprob[triplet[0]] = lprob_add(base_lprob[triplet[0]], lprob - norm)
base_lprob[triplet[1]] = lprob_add(base_lprob[triplet[1]], lprob - norm)
base_lprob[triplet[2]] = lprob_add(base_lprob[triplet[2]], lprob - norm)
assert len(base_lprob) == 4
bases = base_abc.symbols
assert len(bases) == 4
return BaseLprob.create(
base_abc,
(
base_lprob[bases[0]],
base_lprob[bases[1]],
base_lprob[bases[2]],
base_lprob[bases[3]],
),
)
def _create_codon_prob(aminot: AminoLprob, gencode: CodonTable) -> CodonLprob:
codonp = CodonLprob.create(gencode.base_alphabet)
codon_lprobs = []
lprob_norm = lprob_zero()
for i in range(len(aminot.alphabet.symbols)):
aa = aminot.alphabet.symbols[i : i + 1]
lprob = aminot.lprob(aa)
codons = gencode.codons(aa)
if len(codons) == 0:
continue
norm = log(len(codons))
for codon in codons:
codon_lprobs.append((codon, lprob - norm))
lprob_norm = lprob_add(lprob_norm, codon_lprobs[-1][1])
for codon, lprob in codon_lprobs:
codonp.set_lprob(codon, lprob - lprob_norm)
return codonp
|
[
"nmm.CodonLprob.create",
"nmm.BaseLprob.create",
"imm.MuteState.create",
"imm.Sequence.create",
"math.log",
"nmm.AminoLprob.create",
"nmm.codon_iter",
"nmm.FrameState.create",
"nmm.CodonMarg.create",
"imm.lprob_add",
"imm.lprob_zero"
] |
[((6106, 6147), 'nmm.AminoLprob.create', 'AminoLprob.create', (['amino_abc', 'null_lprobs'], {}), '(amino_abc, null_lprobs)\n', (6123, 6147), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((7728, 7734), 'math.log', 'log', (['(3)'], {}), '(3)\n', (7731, 7734), False, 'from math import log\n'), ((7752, 7772), 'nmm.codon_iter', 'codon_iter', (['base_abc'], {}), '(base_abc)\n', (7762, 7772), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((8190, 8310), 'nmm.BaseLprob.create', 'BaseLprob.create', (['base_abc', '(base_lprob[bases[0]], base_lprob[bases[1]], base_lprob[bases[2]],\n base_lprob[bases[3]])'], {}), '(base_abc, (base_lprob[bases[0]], base_lprob[bases[1]],\n base_lprob[bases[2]], base_lprob[bases[3]]))\n', (8206, 8310), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((8483, 8523), 'nmm.CodonLprob.create', 'CodonLprob.create', (['gencode.base_alphabet'], {}), '(gencode.base_alphabet)\n', (8500, 8523), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((8564, 8576), 'imm.lprob_zero', 'lprob_zero', ([], {}), '()\n', (8574, 8576), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((2902, 2942), 'imm.Sequence.create', 'Sequence.create', (['sequence', 'self.alphabet'], {}), '(sequence, self.alphabet)\n', (2917, 2942), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((7319, 7343), 'nmm.CodonMarg.create', 'CodonMarg.create', (['codonp'], {}), '(codonp)\n', (7335, 7343), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((7359, 7412), 'nmm.FrameState.create', 'FrameState.create', (['name', 'baset', 'codonm', 'self._epsilon'], {}), '(name, baset, codonm, self._epsilon)\n', (7376, 7412), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((7674, 7686), 'imm.lprob_zero', 'lprob_zero', ([], {}), '()\n', (7684, 7686), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((7880, 7927), 'imm.lprob_add', 'lprob_add', (['base_lprob[triplet[0]]', '(lprob - norm)'], {}), '(base_lprob[triplet[0]], lprob - norm)\n', (7889, 7927), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((7961, 8008), 'imm.lprob_add', 'lprob_add', (['base_lprob[triplet[1]]', '(lprob - norm)'], {}), '(base_lprob[triplet[1]], lprob - norm)\n', (7970, 8008), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((8042, 8089), 'imm.lprob_add', 'lprob_add', (['base_lprob[triplet[2]]', '(lprob - norm)'], {}), '(base_lprob[triplet[2]], lprob - norm)\n', (8051, 8089), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((6420, 6455), 'nmm.AminoLprob.create', 'AminoLprob.create', (['amino_abc', 'lodds'], {}), '(amino_abc, lodds)\n', (6437, 6455), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((6516, 6559), 'nmm.AminoLprob.create', 'AminoLprob.create', (['amino_abc', 'null_log_odds'], {}), '(amino_abc, null_log_odds)\n', (6533, 6559), False, 'from nmm import AminoAlphabet, AminoLprob, BaseLprob, CodonLprob, CodonMarg, DNAAlphabet, FrameState, RNAAlphabet, codon_iter\n'), ((8937, 8979), 'imm.lprob_add', 'lprob_add', (['lprob_norm', 'codon_lprobs[-1][1]'], {}), '(lprob_norm, codon_lprobs[-1][1])\n', (8946, 8979), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((1092, 1129), 'imm.MuteState.create', 'MuteState.create', (["b'S'", 'base_alphabet'], {}), "(b'S', base_alphabet)\n", (1108, 1129), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((1194, 1231), 'imm.MuteState.create', 'MuteState.create', (["b'B'", 'base_alphabet'], {}), "(b'B', base_alphabet)\n", (1210, 1231), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((1247, 1284), 'imm.MuteState.create', 'MuteState.create', (["b'E'", 'base_alphabet'], {}), "(b'E', base_alphabet)\n", (1263, 1284), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n'), ((1398, 1435), 'imm.MuteState.create', 'MuteState.create', (["b'T'", 'base_alphabet'], {}), "(b'T', base_alphabet)\n", (1414, 1435), False, 'from imm import MuteState, Sequence, lprob_add, lprob_zero\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import torch
from ptranking.ltr_global import global_gpu as gpu
def get_one_hot_reprs(batch_stds):
""" Get one-hot representation of batch ground-truth labels """
batch_size = batch_stds.size(0)
hist_size = batch_stds.size(1)
int_batch_stds = batch_stds.type(torch.cuda.LongTensor) if gpu else batch_stds.type(torch.LongTensor)
hot_batch_stds = torch.cuda.FloatTensor(batch_size, hist_size, 3) if gpu else torch.FloatTensor(batch_size, hist_size, 3)
hot_batch_stds.zero_()
hot_batch_stds.scatter_(2, torch.unsqueeze(int_batch_stds, 2), 1)
return hot_batch_stds
|
[
"torch.unsqueeze",
"torch.cuda.FloatTensor",
"torch.FloatTensor"
] |
[((435, 483), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['batch_size', 'hist_size', '(3)'], {}), '(batch_size, hist_size, 3)\n', (457, 483), False, 'import torch\n'), ((496, 539), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', 'hist_size', '(3)'], {}), '(batch_size, hist_size, 3)\n', (513, 539), False, 'import torch\n'), ((598, 632), 'torch.unsqueeze', 'torch.unsqueeze', (['int_batch_stds', '(2)'], {}), '(int_batch_stds, 2)\n', (613, 632), False, 'import torch\n')]
|
from ..utils import run
import logging
logger = logging.getLogger(__name__)
def process_one_package(path, package, python_version="3"):
"""Get details about one precise python package in the given image.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param package: name of the python package to get info from.
:type package: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing package name, version and size
:rtype: list[string, string, int]
"""
command = f"sudo chroot {path} pip{python_version} show {package}"
info = get_ipython().getoutput(command)
for line in info:
if "Name" in line:
name = line.split(" ").pop()
if "Version" in line:
version = line.split(" ").pop()
if "Location" in line:
location = line.split(" ").pop()
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name}").pop()
# If the folder does not exist, try lowercase
if "cannot access" in result:
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name.lower()}").pop()
# If the lowercase folder do not exist either
if "cannot access" not in result:
size = int(result.split('\t').pop(0))
# List the files by hand
else:
command = f"sudo chroot {path} pip{python_version} show {package} -f"
info = get_ipython().getoutput(command)
flag = False
size = 0
for line in info:
if flag:
command = f"du {path}{location}/{line.strip()}"
size += int(get_ipython().getoutput(command).pop().split('\t').pop(0))
if 'Files' in line:
flag = True
return [name, version, size]
def get_python_packages_info(path, python_version="3"):
"""Get details about all python packages in an image filesystem.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing lists of each package's name, version and size
:rtype: list[list[string, string, int]]
"""
command = f"sudo chroot {path} pip{python_version} list --format freeze --no-cache-dir 2>/dev/null"
packages = [package.split('==')
for package in get_ipython().getoutput(command)]
package_list = []
for package in packages:
try:
package_list.append(process_one_package(path, package[0]))
except Exception as e:
logger.error("Error processing python packages", package[0], e)
pass
return package_list
|
[
"logging.getLogger"
] |
[((49, 76), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (66, 76), False, 'import logging\n')]
|
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, JsonResponse
from django.views.generic.base import View
from django.contrib.auth.mixins import LoginRequiredMixin
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from .models import (
TestCategory,
Test,
Question,
PossibleAnswer,
AnswersCounter
)
from .serializers import (
TestCategorySerializer,
TestSerializer,
QuestionSerializer,
PossibleAnswerSerializer
)
from backend.courses.models import Task, RealizationTask, Course
from backend.courses.api_views import CompletedTasks
from backend.utils.api import BlankGetAPIView
class AllCategories(BlankGetAPIView):
"""
Вывод всех категорий,
параметров нет
"""
permission_classes = [permissions.IsAuthenticated]
model = TestCategory
serializer = TestCategorySerializer
class TestsInCategory(BlankGetAPIView):
"""
Вывод тестов в отдельной категории,
параметр: pk, значение: id категории, тесты которой нужны
"""
permission_classes = [permissions.IsAuthenticated]
model = Test
serializer = TestSerializer
filter_name = 'category_id'
class QuestionsInTest(LoginRequiredMixin, View):
"""Вывод вопросов в отдельном тесте,
параметр: pk, значение: id теста, вопросы которого нужны
"""
def get(self, request):
"""Get"""
quest = Question.objects.filter(test_id=request.GET.get("pk", None)).order_by("-id")
counter = CompleteQuestion().get_counter(request.user, request.GET.get("pk", None))
serializer = QuestionSerializer(quest, many=True)
return JsonResponse(serializer.data, safe=False)
# class QuestionsInTest(BlankGetAPIView):
# """
# Вывод вопросов в отдельном тесте,
# параметр: pk, значение: id теста, вопросы которого нужны
# """
# permission_classes = [permissions.IsAuthenticated]
# model = Question
# serializer = QuestionSerializer
# filter_name = 'test_id'
# order_params = 'id'
class AnswersInQuestion(BlankGetAPIView):
"""
Вывод вариантов ответа к вопросу,
параметр: pk, значение: id вопроса, ответы которого нужны
"""
permission_classes = [permissions.IsAuthenticated]
model = PossibleAnswer
serializer = PossibleAnswerSerializer
filter_name = 'question_id'
order_params = '-id'
class CompleteQuestion(LoginRequiredMixin, View):
"""Вывод результатов теста и его прохождение"""
def post(self, request):
"""Post"""
pks = request.POST.getlist('pks[]', None)
if not pks:
return JsonResponse({'task': {
'exists': None,
'success': None,
'next': None
},
"message": 'Нет ответов!'})
variants = PossibleAnswer.objects.filter(id__in=pks)
# Наличие привязанного таска
task_exists = variants.first().question.test.tasks.exists()
# Количество верных вариантов
right_count = variants.filter(is_right=True).count()
# Общее количество вопросов в тесте
total_questions = variants.first().question.test.questions.count()
# total_variants = variants.filter(is_right=True)
# Проверка на совпадение количества правильных ответов
# и общего количества вопросов
if not variants.filter(is_right=False).exists() and right_count >= total_questions:
success = True
mess = ""
elif variants.filter(is_right=False).exists() and right_count >= total_questions:
success = False
mess = "Тест не пройден"
else:
success = False
mess = ""
course_pk = request.POST.get('course_pk', None)
link = None
if success:
# Получаем RealizationTask текущего юзера и отмечаем его пройденным
realization = self.get_realization(request.user, variants.first())
if realization is not None:
realization.success = True
realization.save()
else:
link = Course.objects.get(id=course_pk).buy_link #, test_in_course=variants.first().question.test
next_task = None
if course_pk:
next_task = CompletedTasks().get_next_task(request, course_pk=course_pk)
return JsonResponse({
'task': {
'exists': task_exists,
'success': success if task_exists else None,
'next': next_task.id if next_task else None
},
'success': success,
'total': total_questions,
'right': right_count,
'link': link,
'message': mess,
})
# def post(self, request):
# """Прохождение теста"""
# pk = request.data.get('pk') # id варианта ответа
#
# try:
# variant = PossibleAnswer.objects.get(id=pk)
# except ObjectDoesNotExist:
# return Response('Нет такого варианта', status=404)
#
# counter = self.get_counter(request.user, variant.question.test.id)
#
# if variant.is_right:
# counter.counter += 1
# counter.save()
#
# if counter.counter >= counter.questions_count:
# realization = self.get_realization(request.user, variant)
#
# if realization is None:
# counter.delete()
# return Response('Не жульничай', status=400)
#
# realization.success = True
# realization.save()
#
# return Response(status=200)
def get(self, request):
"""Вывод результатов"""
pk = request.GET.get('pk') # id теста
counter = self.get_counter(request.user, pk)
return JsonResponse({'total': counter.questions_count,
'right': counter.counter})
@staticmethod
def get_counter(user, pk):
"""Получение счетчика правильных ответов"""
test = Test.objects.get(id=pk)
try:
counter = AnswersCounter.objects.get(user=user, test=test)
except ObjectDoesNotExist:
counter = AnswersCounter.objects.create(user=user, test=test)
# counter = AnswersCounter.objects.get_or_create(user=user, test=test)
return counter
@staticmethod
def get_realization(user, variant):
"""Получение модели выполнения задания"""
try:
realization = RealizationTask.objects.get(
student=user,
task__test__questions__answers__id=variant.id
)
return realization
except ObjectDoesNotExist:
return None
|
[
"backend.courses.api_views.CompletedTasks",
"backend.courses.models.RealizationTask.objects.get",
"backend.courses.models.Course.objects.get",
"django.http.JsonResponse"
] |
[((1717, 1758), 'django.http.JsonResponse', 'JsonResponse', (['serializer.data'], {'safe': '(False)'}), '(serializer.data, safe=False)\n', (1729, 1758), False, 'from django.http import HttpResponse, JsonResponse\n'), ((4423, 4671), 'django.http.JsonResponse', 'JsonResponse', (["{'task': {'exists': task_exists, 'success': success if task_exists else\n None, 'next': next_task.id if next_task else None}, 'success': success,\n 'total': total_questions, 'right': right_count, 'link': link, 'message':\n mess}"], {}), "({'task': {'exists': task_exists, 'success': success if\n task_exists else None, 'next': next_task.id if next_task else None},\n 'success': success, 'total': total_questions, 'right': right_count,\n 'link': link, 'message': mess})\n", (4435, 4671), False, 'from django.http import HttpResponse, JsonResponse\n'), ((5872, 5946), 'django.http.JsonResponse', 'JsonResponse', (["{'total': counter.questions_count, 'right': counter.counter}"], {}), "({'total': counter.questions_count, 'right': counter.counter})\n", (5884, 5946), False, 'from django.http import HttpResponse, JsonResponse\n'), ((2683, 2785), 'django.http.JsonResponse', 'JsonResponse', (["{'task': {'exists': None, 'success': None, 'next': None}, 'message':\n 'Нет ответов!'}"], {}), "({'task': {'exists': None, 'success': None, 'next': None},\n 'message': 'Нет ответов!'})\n", (2695, 2785), False, 'from django.http import HttpResponse, JsonResponse\n'), ((6560, 6652), 'backend.courses.models.RealizationTask.objects.get', 'RealizationTask.objects.get', ([], {'student': 'user', 'task__test__questions__answers__id': 'variant.id'}), '(student=user,\n task__test__questions__answers__id=variant.id)\n', (6587, 6652), False, 'from backend.courses.models import Task, RealizationTask, Course\n'), ((4183, 4215), 'backend.courses.models.Course.objects.get', 'Course.objects.get', ([], {'id': 'course_pk'}), '(id=course_pk)\n', (4201, 4215), False, 'from backend.courses.models import Task, RealizationTask, Course\n'), ((4346, 4362), 'backend.courses.api_views.CompletedTasks', 'CompletedTasks', ([], {}), '()\n', (4360, 4362), False, 'from backend.courses.api_views import CompletedTasks\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.placement import resource_providers_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestResourceProvidersClient(base.BaseServiceTest):
FAKE_RESOURCE_PROVIDER_UUID = '3722a86e-a563-11e9-9abb-c3d41b6d3abf'
FAKE_ROOT_PROVIDER_UUID = '4a6a57c8-a563-11e9-914e-f3e0478fce53'
FAKE_RESOURCE_PROVIDER = {
'generation': 0,
'name': 'Ceph Storage Pool',
'uuid': FAKE_RESOURCE_PROVIDER_UUID,
'parent_provider_uuid': FAKE_ROOT_PROVIDER_UUID,
'root_provider_uuid': FAKE_ROOT_PROVIDER_UUID
}
FAKE_RESOURCE_PROVIDERS = {
'resource_providers': [FAKE_RESOURCE_PROVIDER]
}
FAKE_RESOURCE_PROVIDER_INVENTORIES = {
'inventories': {
'DISK_GB': {
'allocation_ratio': 1.0,
'max_unit': 35,
'min_unit': 1,
'reserved': 0,
'step_size': 1,
'total': 35
}
},
'resource_provider_generation': 7
}
FAKE_AGGREGATE_UUID = '1166be40-a567-11e9-9f2a-53827f9311fa'
FAKE_RESOURCE_PROVIDER_AGGREGATES = {
'aggregates': [FAKE_AGGREGATE_UUID]
}
FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE = {
"inventories": {
"MEMORY_MB": {
"allocation_ratio": 2.0,
"max_unit": 16,
"min_unit": 1,
"reserved": 0,
"step_size": 4,
"total": 128
},
"VCPU": {
"allocation_ratio": 10.0,
"max_unit": 2147483647,
"min_unit": 1,
"reserved": 2,
"step_size": 1,
"total": 64
}
},
"resource_provider_generation": 2
}
FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST = {
"inventories": {
"MEMORY_MB": {
"allocation_ratio": 2.0,
"max_unit": 16,
"step_size": 4,
"total": 128
},
"VCPU": {
"allocation_ratio": 10.0,
"reserved": 2,
"total": 64
}
},
"resource_provider_generation": 1
}
def setUp(self):
super(TestResourceProvidersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = resource_providers_client.ResourceProvidersClient(
fake_auth, 'placement', 'regionOne')
def _test_list_resource_providers(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_providers,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDERS,
to_utf=bytes_body,
status=200
)
def test_list_resource_providers_with_bytes_body(self):
self._test_list_resource_providers()
def test_list_resource_providers_with_str_body(self):
self._test_list_resource_providers(bytes_body=True)
def _test_show_resource_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.show_resource_provider,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDER,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
)
def test_show_resource_provider_with_str_body(self):
self._test_show_resource_provider()
def test_show_resource_provider_with_bytes_body(self):
self._test_show_resource_provider(bytes_body=True)
def _test_list_resource_provider_inventories(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_provider_inventories,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDER_INVENTORIES,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
)
def test_list_resource_provider_inventories_with_str_body(self):
self._test_list_resource_provider_inventories()
def test_list_resource_provider_inventories_with_bytes_body(self):
self._test_list_resource_provider_inventories(bytes_body=True)
def _test_update_resource_providers_inventories(self, bytes_body=False):
self.check_service_client_function(
self.client.update_resource_providers_inventories,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
**self.FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST
)
def test_update_resource_providers_inventories_with_str_body(self):
self._test_update_resource_providers_inventories()
def test_update_resource_providers_inventories_with_bytes_body(self):
self._test_update_resource_providers_inventories(bytes_body=True)
def test_delete_resource_providers_inventories(self):
self.check_service_client_function(
self.client.delete_resource_providers_inventories,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
)
def _test_list_resource_provider_aggregates(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_provider_aggregates,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDER_AGGREGATES,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
)
def test_list_resource_provider_aggregates_with_str_body(self):
self._test_list_resource_provider_aggregates()
def test_list_resource_provider_aggregates_with_bytes_body(self):
self._test_list_resource_provider_aggregates(bytes_body=True)
|
[
"tempest.lib.services.placement.resource_providers_client.ResourceProvidersClient",
"tempest.tests.lib.fake_auth_provider.FakeAuthProvider"
] |
[((2962, 2999), 'tempest.tests.lib.fake_auth_provider.FakeAuthProvider', 'fake_auth_provider.FakeAuthProvider', ([], {}), '()\n', (2997, 2999), False, 'from tempest.tests.lib import fake_auth_provider\n'), ((3022, 3112), 'tempest.lib.services.placement.resource_providers_client.ResourceProvidersClient', 'resource_providers_client.ResourceProvidersClient', (['fake_auth', '"""placement"""', '"""regionOne"""'], {}), "(fake_auth, 'placement',\n 'regionOne')\n", (3071, 3112), False, 'from tempest.lib.services.placement import resource_providers_client\n')]
|
from problems import utils, mymath
@utils.memoize
def sum_proper_factors(n):
return sum(mymath.proper_factorization(n))
def solve():
upper_bound = 1000000
chains = dict()
for start_number in range(1, upper_bound):
chain = [start_number]
current_number = sum_proper_factors(start_number)
while current_number != start_number:
if current_number > upper_bound or current_number == 0 or len(chain) > 100:
break
elif current_number in chains:
chain += chains[current_number]
break
else:
chain.append(current_number)
current_number = sum_proper_factors(current_number)
if current_number == start_number:
chains[start_number] = chain
chain_lengths = {i: len(chains[i]) for i in chains}
max_key = mymath.key_of_max_value(chain_lengths)
return min(chains[max_key])
if __name__ == '__main__':
print(solve())
|
[
"problems.mymath.proper_factorization",
"problems.mymath.key_of_max_value"
] |
[((881, 919), 'problems.mymath.key_of_max_value', 'mymath.key_of_max_value', (['chain_lengths'], {}), '(chain_lengths)\n', (904, 919), False, 'from problems import utils, mymath\n'), ((94, 124), 'problems.mymath.proper_factorization', 'mymath.proper_factorization', (['n'], {}), '(n)\n', (121, 124), False, 'from problems import utils, mymath\n')]
|
import logging
from django.utils import timezone
from typing import Union
from .exceptions import InvalidTrustchain, TrustchainMissingMetadata
from .models import FetchedEntityStatement, TrustChain
from .statements import EntityConfiguration, get_entity_configurations
from .settings import HTTPC_PARAMS
from .trust_chain import TrustChainBuilder
from .utils import datetime_from_timestamp
logger = logging.getLogger(__name__)
def trust_chain_builder(
subject: str,
trust_anchor: EntityConfiguration,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = []
) -> Union[TrustChainBuilder, bool]:
"""
Trust Chain builder
"""
tc = TrustChainBuilder(
subject,
trust_anchor=trust_anchor,
required_trust_marks=required_trust_marks,
httpc_params=httpc_params
)
tc.start()
if not tc.is_valid:
logger.error(
"The tree of trust cannot be validated for "
f"{tc.subject}: {tc.tree_of_trust}"
)
return False
else:
return tc
def dumps_statements_from_trust_chain_to_db(trust_chain: TrustChainBuilder) -> list:
entity_statements = []
for stat in trust_chain.trust_path:
data = dict(
exp=datetime_from_timestamp(stat.payload["exp"]),
iat=datetime_from_timestamp(stat.payload["iat"]),
statement=stat.payload,
jwt=stat.jwt,
)
fes = FetchedEntityStatement.objects.filter(sub=stat.sub, iss=stat.iss)
if fes:
fes.update(**data)
else:
fes = FetchedEntityStatement.objects.create(
sub=stat.sub, iss=stat.iss, **data
)
entity_statements.append(fes)
if stat.verified_descendant_statements:
for desc_stat_sub in stat.verified_descendant_statements:
payload = stat.verified_descendant_statements[desc_stat_sub]
jwt = stat.verified_descendant_statements_as_jwt[desc_stat_sub]
_data = dict(
exp=datetime_from_timestamp(payload["exp"]),
iat=datetime_from_timestamp(payload["iat"]),
statement=payload,
jwt=jwt,
)
desc_fes = FetchedEntityStatement.objects.filter(
sub=payload["sub"], iss=payload["iss"]
)
if desc_fes:
desc_fes.update(**_data)
else:
desc_fes = FetchedEntityStatement.objects.create(
sub=payload["sub"], iss=payload["iss"], **_data
)
entity_statements.append(desc_fes)
return entity_statements
def get_or_create_trust_chain(
subject: str,
trust_anchor: str,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = [],
force: bool = False,
) -> Union[TrustChain, None]:
"""
returns a TrustChain model object if any available
if available it return it
if not available it create a new one
if available and expired it return the expired one
if flag force is set to True -> renew the trust chain, update it and
return the updated one
"""
fetched_trust_anchor = FetchedEntityStatement.objects.filter(
sub=trust_anchor, iss=trust_anchor
)
if not fetched_trust_anchor or fetched_trust_anchor.first().is_expired or force:
jwts = get_entity_configurations([trust_anchor], httpc_params=httpc_params)
ta_conf = EntityConfiguration(jwts[0], httpc_params=httpc_params)
data = dict(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
if not fetched_trust_anchor:
# trust to the anchor should be absolute trusted!
# ta_conf.validate_by_itself()
fetched_trust_anchor = FetchedEntityStatement.objects.create(
sub=ta_conf.sub, iss=ta_conf.iss, **data
)
else:
fetched_trust_anchor.update(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
fetched_trust_anchor = fetched_trust_anchor.first()
else:
fetched_trust_anchor = fetched_trust_anchor.first()
ta_conf = fetched_trust_anchor.get_entity_configuration_as_obj()
tc = TrustChain.objects.filter(sub=subject, trust_anchor__sub=trust_anchor).first()
if tc and not tc.is_active:
# if manualy disabled by staff
return None
elif force or not tc or tc.is_expired:
trust_chain = trust_chain_builder(
subject=subject,
trust_anchor=ta_conf,
required_trust_marks=required_trust_marks
)
if not trust_chain:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not found"
)
elif not trust_chain.is_valid:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not valid"
)
elif not trust_chain.final_metadata:
raise TrustchainMissingMetadata(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} doesn't have any metadata"
)
dumps_statements_from_trust_chain_to_db(trust_chain)
tc = TrustChain.objects.filter(
sub=subject, trust_anchor__sub=trust_anchor
)
data = dict(
exp=trust_chain.exp_datetime,
processing_start = timezone.localtime(),
chain=trust_chain.serialize(),
metadata=trust_chain.final_metadata,
parties_involved=[i.sub for i in trust_chain.trust_path],
status="valid",
trust_marks=[
{"id": i.id, "trust_mark": i.jwt}
for i in trust_chain.verified_trust_marks
],
is_active=True,
)
if tc:
tc.update(**data)
tc = tc.first()
else:
tc = TrustChain.objects.create(
sub=subject,
trust_anchor=fetched_trust_anchor,
**data,
)
return tc
|
[
"logging.getLogger",
"django.utils.timezone.localtime"
] |
[((402, 429), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (419, 429), False, 'import logging\n'), ((5925, 5945), 'django.utils.timezone.localtime', 'timezone.localtime', ([], {}), '()\n', (5943, 5945), False, 'from django.utils import timezone\n')]
|
##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import tests
from common import TestCommon
from results import PassFailMultiResult
class CompilerRTBuiltinsAbstract(TestCommon):
def get_finish_string(self):
return "usleeptest_done"
def process_data(self, testdir, rawiter):
# the test passed if no error occurred
errors = []
for line in rawiter:
if "error in" in line:
errors.append(line)
if line.startswith("Assertion failed on core"):
errors.append(line)
return PassFailMultiResult(self.name, errors)
# lists of tests to run for compiler-rt
vector_fp_tests = [
"compiler-rt/test/builtins/Unit/adddf3vfp_test",
"compiler-rt/test/builtins/Unit/addsf3vfp_test",
"compiler-rt/test/builtins/Unit/divdf3vfp_test",
"compiler-rt/test/builtins/Unit/divsf3vfp_test",
"compiler-rt/test/builtins/Unit/eqdf2vfp_test",
"compiler-rt/test/builtins/Unit/eqsf2vfp_test",
"compiler-rt/test/builtins/Unit/extebdsfdf2vfp_test",
"compiler-rt/test/builtins/Unit/fixdfsivfp_test",
"compiler-rt/test/builtins/Unit/fixsfsivfp_test",
"compiler-rt/test/builtins/Unit/fixunsdfsivfp_test",
"compiler-rt/test/builtins/Unit/fixunssfsivfp_test",
"compiler-rt/test/builtins/Unit/floatsidfvfp_test",
"compiler-rt/test/builtins/Unit/floatsisfvfp_test",
"compiler-rt/test/builtins/Unit/floatunssidfvfp_test",
"compiler-rt/test/builtins/Unit/floatunssisfvfp_test",
"compiler-rt/test/builtins/Unit/gedf2vfp_test",
"compiler-rt/test/builtins/Unit/gesf2vfp_test",
"compiler-rt/test/builtins/Unit/gtdf2vfp_test",
"compiler-rt/test/builtins/Unit/gtsf2vfp_test",
"compiler-rt/test/builtins/Unit/ledf2vfp_test",
"compiler-rt/test/builtins/Unit/lesf2vfp_test",
"compiler-rt/test/builtins/Unit/ltdf2vfp_test",
"compiler-rt/test/builtins/Unit/ltsf2vfp_test",
"compiler-rt/test/builtins/Unit/muldf3vfp_test",
"compiler-rt/test/builtins/Unit/mulsf3vfp_test",
"compiler-rt/test/builtins/Unit/nedf2vfp_test",
"compiler-rt/test/builtins/Unit/negdf2vfp_test",
"compiler-rt/test/builtins/Unit/negsf2vfp_test",
"compiler-rt/test/builtins/Unit/nesf2vfp_test",
"compiler-rt/test/builtins/Unit/subdf3vfp_test",
"compiler-rt/test/builtins/Unit/subsf3vfp_test",
"compiler-rt/test/builtins/Unit/truncdfsf2vfp_test",
"compiler-rt/test/builtins/Unit/unorddf2vfp_test",
"compiler-rt/test/builtins/Unit/unordsf2vfp_test",
]
@tests.add_test
class CompilerRTBuiltinsVfp(CompilerRTBuiltinsAbstract):
name = 'compiler-rt-vfp'
def get_modules(self, build, machine):
modules = super(CompilerRTBuiltinsVfp, self).get_modules(build, machine)
for m in vector_fp_tests:
modules.add_module(m)
modules.add_module("usleeptest", [ "5" ])
return modules
fp_tests = [
"compiler-rt/test/builtins/Unit/absvdi2_test",
"compiler-rt/test/builtins/Unit/absvsi2_test",
"compiler-rt/test/builtins/Unit/absvti2_test",
"compiler-rt/test/builtins/Unit/addtf3_test",
"compiler-rt/test/builtins/Unit/addvdi3_test",
"compiler-rt/test/builtins/Unit/addvsi3_test",
"compiler-rt/test/builtins/Unit/addvti3_test",
"compiler-rt/test/builtins/Unit/ashldi3_test",
"compiler-rt/test/builtins/Unit/ashlti3_test",
"compiler-rt/test/builtins/Unit/ashrdi3_test",
"compiler-rt/test/builtins/Unit/ashrti3_test",
"compiler-rt/test/builtins/Unit/bswapdi2_test",
"compiler-rt/test/builtins/Unit/bswapsi2_test",
# "compiler-rt/test/builtins/Unit/clear_cache_test",
"compiler-rt/test/builtins/Unit/clzdi2_test",
"compiler-rt/test/builtins/Unit/clzsi2_test",
"compiler-rt/test/builtins/Unit/clzti2_test",
"compiler-rt/test/builtins/Unit/cmpdi2_test",
"compiler-rt/test/builtins/Unit/cmpti2_test",
"compiler-rt/test/builtins/Unit/comparedf2_test",
"compiler-rt/test/builtins/Unit/comparesf2_test",
"compiler-rt/test/builtins/Unit/ctzdi2_test",
"compiler-rt/test/builtins/Unit/ctzsi2_test",
"compiler-rt/test/builtins/Unit/ctzti2_test",
"compiler-rt/test/builtins/Unit/divdc3_test",
"compiler-rt/test/builtins/Unit/divdi3_test",
"compiler-rt/test/builtins/Unit/divmodsi4_test",
"compiler-rt/test/builtins/Unit/divsc3_test",
"compiler-rt/test/builtins/Unit/divsi3_test",
# "compiler-rt/test/builtins/Unit/divtc3_test",
"compiler-rt/test/builtins/Unit/divtf3_test",
"compiler-rt/test/builtins/Unit/divti3_test",
"compiler-rt/test/builtins/Unit/divxc3_test",
# "compiler-rt/test/builtins/Unit/enable_execute_stack_test",
"compiler-rt/test/builtins/Unit/eqtf2_test",
"compiler-rt/test/builtins/Unit/extenddftf2_test",
# "compiler-rt/test/builtins/Unit/extendhfsf2_test",
"compiler-rt/test/builtins/Unit/extendsftf2_test",
"compiler-rt/test/builtins/Unit/ffsdi2_test",
"compiler-rt/test/builtins/Unit/ffsti2_test",
"compiler-rt/test/builtins/Unit/fixdfdi_test",
"compiler-rt/test/builtins/Unit/fixdfti_test",
"compiler-rt/test/builtins/Unit/fixsfdi_test",
"compiler-rt/test/builtins/Unit/fixsfti_test",
"compiler-rt/test/builtins/Unit/fixtfdi_test",
"compiler-rt/test/builtins/Unit/fixtfsi_test",
"compiler-rt/test/builtins/Unit/fixtfti_test",
# this errors on 0X1P+64
#"compiler-rt/test/builtins/Unit/fixunsdfdi_test",
"compiler-rt/test/builtins/Unit/fixunsdfsi_test",
"compiler-rt/test/builtins/Unit/fixunsdfti_test",
# this errors on 0X1P+64
#"compiler-rt/test/builtins/Unit/fixunssfdi_test",
"compiler-rt/test/builtins/Unit/fixunssfsi_test",
"compiler-rt/test/builtins/Unit/fixunssfti_test",
"compiler-rt/test/builtins/Unit/fixunstfdi_test",
"compiler-rt/test/builtins/Unit/fixunstfsi_test",
"compiler-rt/test/builtins/Unit/fixunstfti_test",
"compiler-rt/test/builtins/Unit/fixunsxfdi_test",
"compiler-rt/test/builtins/Unit/fixunsxfsi_test",
"compiler-rt/test/builtins/Unit/fixunsxfti_test",
"compiler-rt/test/builtins/Unit/fixxfdi_test",
"compiler-rt/test/builtins/Unit/fixxfti_test",
"compiler-rt/test/builtins/Unit/floatdidf_test",
"compiler-rt/test/builtins/Unit/floatdisf_test",
"compiler-rt/test/builtins/Unit/floatditf_test",
"compiler-rt/test/builtins/Unit/floatdixf_test",
"compiler-rt/test/builtins/Unit/floatsitf_test",
"compiler-rt/test/builtins/Unit/floattidf_test",
"compiler-rt/test/builtins/Unit/floattisf_test",
"compiler-rt/test/builtins/Unit/floattixf_test",
"compiler-rt/test/builtins/Unit/floatundidf_test",
"compiler-rt/test/builtins/Unit/floatundisf_test",
"compiler-rt/test/builtins/Unit/floatunditf_test",
"compiler-rt/test/builtins/Unit/floatundixf_test",
"compiler-rt/test/builtins/Unit/floatunsitf_test",
"compiler-rt/test/builtins/Unit/floatuntidf_test",
"compiler-rt/test/builtins/Unit/floatuntisf_test",
"compiler-rt/test/builtins/Unit/floatuntixf_test",
# "compiler-rt/test/builtins/Unit/gcc_personality_test",
"compiler-rt/test/builtins/Unit/getf2_test",
"compiler-rt/test/builtins/Unit/gttf2_test",
"compiler-rt/test/builtins/Unit/letf2_test",
"compiler-rt/test/builtins/Unit/lshrdi3_test",
"compiler-rt/test/builtins/Unit/lshrti3_test",
"compiler-rt/test/builtins/Unit/lttf2_test",
"compiler-rt/test/builtins/Unit/moddi3_test",
"compiler-rt/test/builtins/Unit/modsi3_test",
"compiler-rt/test/builtins/Unit/modti3_test",
"compiler-rt/test/builtins/Unit/muldc3_test",
"compiler-rt/test/builtins/Unit/muldi3_test",
"compiler-rt/test/builtins/Unit/mulodi4_test",
"compiler-rt/test/builtins/Unit/mulosi4_test",
"compiler-rt/test/builtins/Unit/muloti4_test",
"compiler-rt/test/builtins/Unit/mulsc3_test",
"compiler-rt/test/builtins/Unit/multc3_test",
"compiler-rt/test/builtins/Unit/multf3_test",
"compiler-rt/test/builtins/Unit/multi3_test",
"compiler-rt/test/builtins/Unit/mulvdi3_test",
"compiler-rt/test/builtins/Unit/mulvsi3_test",
"compiler-rt/test/builtins/Unit/mulvti3_test",
"compiler-rt/test/builtins/Unit/mulxc3_test",
"compiler-rt/test/builtins/Unit/negdi2_test",
"compiler-rt/test/builtins/Unit/negti2_test",
"compiler-rt/test/builtins/Unit/negvdi2_test",
"compiler-rt/test/builtins/Unit/negvsi2_test",
"compiler-rt/test/builtins/Unit/negvti2_test",
"compiler-rt/test/builtins/Unit/netf2_test",
"compiler-rt/test/builtins/Unit/paritydi2_test",
"compiler-rt/test/builtins/Unit/paritysi2_test",
"compiler-rt/test/builtins/Unit/parityti2_test",
"compiler-rt/test/builtins/Unit/popcountdi2_test",
"compiler-rt/test/builtins/Unit/popcountsi2_test",
"compiler-rt/test/builtins/Unit/popcountti2_test",
"compiler-rt/test/builtins/Unit/powidf2_test",
"compiler-rt/test/builtins/Unit/powisf2_test",
"compiler-rt/test/builtins/Unit/powitf2_test",
"compiler-rt/test/builtins/Unit/powixf2_test",
"compiler-rt/test/builtins/Unit/subtf3_test",
"compiler-rt/test/builtins/Unit/subvdi3_test",
"compiler-rt/test/builtins/Unit/subvsi3_test",
"compiler-rt/test/builtins/Unit/subvti3_test",
# "compiler-rt/test/builtins/Unit/trampoline_setup_test",
# "compiler-rt/test/builtins/Unit/truncdfhf2_test",
"compiler-rt/test/builtins/Unit/truncdfsf2_test",
# "compiler-rt/test/builtins/Unit/truncsfhf2_test",
"compiler-rt/test/builtins/Unit/trunctfdf2_test",
"compiler-rt/test/builtins/Unit/trunctfsf2_test",
"compiler-rt/test/builtins/Unit/ucmpdi2_test",
"compiler-rt/test/builtins/Unit/ucmpti2_test",
"compiler-rt/test/builtins/Unit/udivdi3_test",
"compiler-rt/test/builtins/Unit/udivmoddi4_test",
"compiler-rt/test/builtins/Unit/udivmodsi4_test",
"compiler-rt/test/builtins/Unit/udivmodti4_test",
"compiler-rt/test/builtins/Unit/udivsi3_test",
"compiler-rt/test/builtins/Unit/udivti3_test",
"compiler-rt/test/builtins/Unit/umoddi3_test",
"compiler-rt/test/builtins/Unit/umodsi3_test",
"compiler-rt/test/builtins/Unit/umodti3_test",
"compiler-rt/test/builtins/Unit/unordtf2_test",
]
def get_modules_tpl(ts, self, build, machine):
'''Function template for get_modules() for each compiler-rt test case'''
modules = super(CompilerRTBuiltinsAbstract, self).get_modules(build, machine)
for m in ts:
if machine.name.startswith("panda") and \
(m.endswith("floatdisf_test") or m.endswith("floatdidf_test")):
# Skip failing test on pandaboard
continue
modules.add_module(m)
modules.add_module("usleeptest", [ "5" ])
return modules
def chunker(seq, size):
'''Helper function: this takes a sequence `seq` and splits it up into
`size`-sized chunks, except for the last chunk which is just the <= size
long remainder of the sequence'''
return (seq[pos:pos+size] for pos in xrange(0, len(seq), size))
# generate test-cases with <=CHUNK_SIZE compiler-rt tests each
CHUNK_SIZE=35
# array just to keep the class objects somewhere
compiler_rt_tests_classes = []
for i, ts in enumerate(chunker(fp_tests, CHUNK_SIZE)):
# append new class to our array
compiler_rt_tests_classes.append(
# this is essentially the decorator @tests.add_test
tests.add_test(
# type is the (built-in) base-class for python classes, here we
# construct classes by calling its constructor
# signature of type constructor:
# type(classname, baseclass tuple, dict with methods/attributes)
type('CompilerRTBuiltins%d' % (i+1),
(CompilerRTBuiltinsAbstract,),
{ 'name': 'compiler-rt-fp%d' % (i+1),
# partially bind the get_modules() template to select the
# right set of tests. Note the ts=ts in the lambda
# arguments, this prevents python's default late-binding
# for closure arguments.
'get_modules':
lambda s, b, m, ts=ts: get_modules_tpl(ts, s, b, m)})))
|
[
"results.PassFailMultiResult"
] |
[((949, 987), 'results.PassFailMultiResult', 'PassFailMultiResult', (['self.name', 'errors'], {}), '(self.name, errors)\n', (968, 987), False, 'from results import PassFailMultiResult\n')]
|
import requests
from requests.exceptions import HTTPError
import time
class APIBase:
"""
This class is to be used as a base to build an API library.
Authorization token generation and endpoint functions must be written
"""
def __init__(self, root, proxies=None, requests_session=True, max_retries=10, requests_timeout=None):
"""
Initialize the class
:param: root: Root URL for the API
:param proxies: A dictionary of proxies, if needed
:param requests_session: Use request Sessions class. Speeds up API calls significantly when set to True
:param max_retries: Maximum amount of times to retry an API call before stopping
:param requests_timeout: Number of seconds requests should wait before timing out
"""
self.proxies = proxies
self.token_str = "" # Encrypted API token. This will need to be set manually or by a method of a subclass
self.root = root
self.max_retries = max_retries
self.requests_timeout = requests_timeout
if requests_session:
self._session = requests.Session()
else:
self._session = requests.api # individual calls, slower
def _auth_headers(self):
"""
Get header for API request
:return: header in dictionary format
"""
if self.token_str:
return {'Authorization': 'Bearer {}'.format(self.token_str)}
else:
return {}
def _call(self, method, url, params):
"""
Make a call to the API
:param method: 'GET', 'POST', 'DELETE', or 'PUT'
:param url: URL of API endpoint
:param params: API paramaters
:return: JSON data from the API
"""
if not url.startswith('http'):
url = self.root + url
headers = self._auth_headers()
headers['Content-Type'] = 'application/json'
r = self._session.request(method, url,
headers=headers,
proxies=self.proxies,
params=params,
timeout=self.requests_timeout)
r.raise_for_status() # Check for error
return r.json()
def _get(self, url, **kwargs):
"""
GET request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
retries = self.max_retries
delay = 1
while retries > 0:
try:
return self._call('GET', url, kwargs)
except HTTPError as e: # Retry for some known issues
retries -= 1
status = e.response.status_code
if status == 429 or (500 <= status < 600):
if retries < 0:
raise
else:
print('retrying ...' + str(delay) + ' secs')
time.sleep(delay + 1)
delay += 1
else:
raise
except Exception as e:
print('exception', str(e))
retries -= 1
if retries >= 0:
print('retrying ...' + str(delay) + 'secs')
time.sleep(delay + 1)
delay += 1
else:
raise
def _post(self, url, **kwargs):
"""
POST request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
return self._call('POST', url, kwargs)
def _delete(self, url, **kwargs):
"""
DELETE request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
return self._call('DELETE', url, kwargs)
def _put(self, url, **kwargs):
"""
PUT request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
return self._call('PUT', url, kwargs)
|
[
"requests.Session",
"time.sleep"
] |
[((1113, 1131), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1129, 1131), False, 'import requests\n'), ((3310, 3331), 'time.sleep', 'time.sleep', (['(delay + 1)'], {}), '(delay + 1)\n', (3320, 3331), False, 'import time\n'), ((2981, 3002), 'time.sleep', 'time.sleep', (['(delay + 1)'], {}), '(delay + 1)\n', (2991, 3002), False, 'import time\n')]
|
# Copyright (C) 2011-2020 Airbus, <EMAIL>
containers = { 'ELF': 'X86_64', 'MACHO': 'X86_64' }
try:
from plasmasm.python.compatibility import set
except ImportError:
pass
from plasmasm.arch.I386 import opcodes as opcodes_x86
x64_att_opcodes = set([
'jmpq', 'callq', 'retq', 'popq', 'pushq',
'movq', 'cmpq', 'testq', 'leaq', 'btq', 'bswapq',
'notq', 'orq', 'xorq', 'andq', 'bsfq', 'bslq', 'bsrq',
'rolq', 'rorq', 'sarq', 'salq', 'shrq', 'shlq', 'sbbq',
'negq', 'decq', 'incq', 'adcq', 'addq', 'subq',
'mulq', 'divq', 'imulq', 'idivq', 'shldq', 'shrdq',
'cltq', 'cqto', 'movabsq', 'movsbq', 'movslq', 'movswq',
'insq', 'movsq', 'outsq', 'lodsq', 'stosq', 'cmpsq', 'scasq',
'pextrq', 'pinsrq',
'cvtsi2sdq', 'cvtsi2ssq', 'cvttsd2siq', 'cvttss2siq',
])
suffix = [ 'a', 'ae', 'b', 'be', 'c', 'e', 'g', 'ge', 'l', 'le', 'nb', 'nc', 'ne', 'np', 'ns', 'nz', 'p', 's', ]
x64_att_opcodes.update(set([ 'cmov'+s+'q' for s in suffix ]))
del suffix
x64_att_opcodes.update(opcodes_x86['I386-att'])
opcodes = {
'X64-att': x64_att_opcodes,
}
|
[
"plasmasm.python.compatibility.set"
] |
[((250, 779), 'plasmasm.python.compatibility.set', 'set', (["['jmpq', 'callq', 'retq', 'popq', 'pushq', 'movq', 'cmpq', 'testq', 'leaq',\n 'btq', 'bswapq', 'notq', 'orq', 'xorq', 'andq', 'bsfq', 'bslq', 'bsrq',\n 'rolq', 'rorq', 'sarq', 'salq', 'shrq', 'shlq', 'sbbq', 'negq', 'decq',\n 'incq', 'adcq', 'addq', 'subq', 'mulq', 'divq', 'imulq', 'idivq',\n 'shldq', 'shrdq', 'cltq', 'cqto', 'movabsq', 'movsbq', 'movslq',\n 'movswq', 'insq', 'movsq', 'outsq', 'lodsq', 'stosq', 'cmpsq', 'scasq',\n 'pextrq', 'pinsrq', 'cvtsi2sdq', 'cvtsi2ssq', 'cvttsd2siq', 'cvttss2siq']"], {}), "(['jmpq', 'callq', 'retq', 'popq', 'pushq', 'movq', 'cmpq', 'testq',\n 'leaq', 'btq', 'bswapq', 'notq', 'orq', 'xorq', 'andq', 'bsfq', 'bslq',\n 'bsrq', 'rolq', 'rorq', 'sarq', 'salq', 'shrq', 'shlq', 'sbbq', 'negq',\n 'decq', 'incq', 'adcq', 'addq', 'subq', 'mulq', 'divq', 'imulq',\n 'idivq', 'shldq', 'shrdq', 'cltq', 'cqto', 'movabsq', 'movsbq',\n 'movslq', 'movswq', 'insq', 'movsq', 'outsq', 'lodsq', 'stosq', 'cmpsq',\n 'scasq', 'pextrq', 'pinsrq', 'cvtsi2sdq', 'cvtsi2ssq', 'cvttsd2siq',\n 'cvttss2siq'])\n", (253, 779), False, 'from plasmasm.python.compatibility import set\n'), ((979, 1020), 'plasmasm.python.compatibility.set', 'set', (["[('cmov' + s + 'q') for s in suffix]"], {}), "([('cmov' + s + 'q') for s in suffix])\n", (982, 1020), False, 'from plasmasm.python.compatibility import set\n')]
|
# Generated by Django 3.1.5 on 2021-01-25 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20210124_0610'),
]
operations = [
migrations.RenameModel(
old_name='Parent',
new_name='Account',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((223, 284), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Parent"""', 'new_name': '"""Account"""'}), "(old_name='Parent', new_name='Account')\n", (245, 284), False, 'from django.db import migrations\n')]
|
#!/usr/bin/python3
import threading
import time
import base64
from lynkco_app_request import lynkco_app_request
from com.uestcit.api.gateway.sdk.auth.aes import aes as AES
from sms_request import sms_request
import json
import sys
import os
import re
class lynco_regist_wrok(threading.Thread):
"""新开线程处理任务"""
def __init__(self, config):
# 初始化线程
threading.Thread.__init__(self)
# 缓存配置信息
self.config = config
self.project_id = self.config['sms_platform']['project_id']
self.max_count = int(self.config['sms_platform']['count'])
self.sms_request = sms_request()
# 缓存APPKEY(因为存储的是base64后的值,所以需要base64解码一次)
self.app_key = base64.b64decode(self.config['api_geteway']['app_key']).decode('utf-8')
# 缓存APPSECRET(因为存储的是base64后的值,所以需要base64解码一次)
self.app_secret = base64.b64decode(self.config['api_geteway']['app_secret']).decode('utf-8')
# 缓存AESKEY(因为存储的是两次base64后的值,所以需要base64解码两次)
self.aes_key = base64.b64decode(base64.b64decode(self.config['aes_key']).decode('utf-8')).decode('utf-8')
self.AES = AES(self.aes_key)
self.lynkco_app_request = lynkco_app_request(self.app_key, self.app_secret)
def run(self):
"""线程开始的方法"""
print ("开始注册任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
self.token = self.get_token()
if('' == self.token):
return 0
phone_list = []
while len(phone_list) < self.max_count:
phone = self.regist()
if('' == phone):
continue
phone_list.append({ 'username': phone, 'password': '<PASSWORD>' })
with open(sys.path[0] + '/phone_list_' + time.strftime('%Y%m%d%H%M%S') + '.json', 'w') as json_file:
json_file.write(json.dumps(phone_list,ensure_ascii = False))
print ("注册执行完成任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
def get_token(self):
"""登录获取token"""
sms_username = self.config['sms_platform']['username']
sms_password = self.config['sms_platform']['password']
context = self.sms_request.login(sms_username, sms_password)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户登录失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
token = array[1]
print("短信账户登录成功,token:" + token + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return token
def regist(self):
"""App端操作流程"""
# 获取一个手机号
context = self.sms_request.get_phone(self.token, self.project_id)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户获取手机号失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
phone = array[1]
# 发送注册短信
response = self.lynkco_app_request.get_vcode_by_regist(phone)
if response['code'] != 'success':
print("发送注册短信失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
# 循环10次获取短信内容,每次获取失败等待3秒钟
vcode = ''
fail_count = 0;
while fail_count < 10:
context = self.sms_request.get_phone_msg(self.token, self.project_id, phone)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户获取验证码内容失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
fail_count += 1
time.sleep(3)
else:
context = array[1]
# 此处需要正则取验证码
pattern = re.compile(r'\d{6}')
result = pattern.findall(context)
if(len(result) != 1):
print("短信账户解析验证码内容失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
else:
vcode = result[0]
print("短信账户获取验证码内容成功:" + vcode + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
break
if('' == vcode):
return ''
# 发送注册
password = self.AES.encrypt('<PASSWORD>')
response = self.lynkco_app_request.regist(phone, password, vcode)
if response['code'] != 'success':
print("发送注册接口失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
# 尝试登陆一次
response = self.lynkco_app_request.login(phone, password)
if response['code'] != 'success':
print("尝试接口失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return phone
return phone
|
[
"threading.Thread.__init__",
"re.compile",
"time.strftime",
"json.dumps",
"base64.b64decode",
"time.sleep",
"com.uestcit.api.gateway.sdk.auth.aes.aes",
"sms_request.sms_request",
"lynkco_app_request.lynkco_app_request"
] |
[((371, 402), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (396, 402), False, 'import threading\n'), ((611, 624), 'sms_request.sms_request', 'sms_request', ([], {}), '()\n', (622, 624), False, 'from sms_request import sms_request\n'), ((1113, 1130), 'com.uestcit.api.gateway.sdk.auth.aes.aes', 'AES', (['self.aes_key'], {}), '(self.aes_key)\n', (1116, 1130), True, 'from com.uestcit.api.gateway.sdk.auth.aes import aes as AES\n'), ((1165, 1214), 'lynkco_app_request.lynkco_app_request', 'lynkco_app_request', (['self.app_key', 'self.app_secret'], {}), '(self.app_key, self.app_secret)\n', (1183, 1214), False, 'from lynkco_app_request import lynkco_app_request\n'), ((700, 755), 'base64.b64decode', 'base64.b64decode', (["self.config['api_geteway']['app_key']"], {}), "(self.config['api_geteway']['app_key'])\n", (716, 755), False, 'import base64\n'), ((852, 910), 'base64.b64decode', 'base64.b64decode', (["self.config['api_geteway']['app_secret']"], {}), "(self.config['api_geteway']['app_secret'])\n", (868, 910), False, 'import base64\n'), ((1304, 1338), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1317, 1338), False, 'import time\n'), ((1803, 1845), 'json.dumps', 'json.dumps', (['phone_list'], {'ensure_ascii': '(False)'}), '(phone_list, ensure_ascii=False)\n', (1813, 1845), False, 'import json\n'), ((1894, 1928), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1907, 1928), False, 'import time\n'), ((2425, 2459), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (2438, 2459), False, 'import time\n'), ((3461, 3474), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3471, 3474), False, 'import time\n'), ((3583, 3603), 're.compile', 're.compile', (['"""\\\\d{6}"""'], {}), "('\\\\d{6}')\n", (3593, 3603), False, 'import re\n'), ((2292, 2326), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (2305, 2326), False, 'import time\n'), ((2742, 2776), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (2755, 2776), False, 'import time\n'), ((3005, 3039), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3018, 3039), False, 'import time\n'), ((4270, 4304), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (4283, 4304), False, 'import time\n'), ((4506, 4540), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (4519, 4540), False, 'import time\n'), ((1715, 1744), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (1728, 1744), False, 'import time\n'), ((3405, 3439), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3418, 3439), False, 'import time\n'), ((1020, 1060), 'base64.b64decode', 'base64.b64decode', (["self.config['aes_key']"], {}), "(self.config['aes_key'])\n", (1036, 1060), False, 'import base64\n'), ((3781, 3815), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3794, 3815), False, 'import time\n'), ((3936, 3970), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3949, 3970), False, 'import time\n')]
|
# Author: <NAME> <<EMAIL>>
# A core-attachment based method to detect protein complexes in PPI networks
# <NAME>, Kwoh, Ng (2009)
# http://www.biomedcentral.com/1471-2105/10/169
from collections import defaultdict
from itertools import combinations
import functools
# return average degree and density for a graph
def __graph_stats(graph):
avg_deg = sum(len(n) for n in graph.values()) / float(len(graph))
density = avg_deg / (len(graph) - 1)
return avg_deg, density
# return core nodes, given a graph and its average degree
__get_core_nodes = lambda g, avg: set(v for v, n in g.items() if len(n) >= avg)
# return NA score
__NA_score = lambda a, b: float(len(a & b) ** 2) / (len(a) * len(b))
def __core_removal(graph, density_threshold):
if len(graph) == 1: # need at least two nodes in the graph...
return [graph]
avg_deg, density = __graph_stats(graph)
if density >= density_threshold:
return [graph]
else:
# find and remove core nodes; create connected subcomponents
core_nodes = __get_core_nodes(graph, avg_deg)
result = []
subgraphs = []
for v, n in graph.items():
if v in core_nodes:
continue
n = n - core_nodes # note that we're reassigning n
for s in subgraphs:
if not n.isdisjoint(s):
s |= n
break
else:
subgraphs.append(n | {v})
# connected subcomponent joining
i = 0
while i < len(subgraphs) - 1:
j = i + 1
while j < len(subgraphs):
if not subgraphs[i].isdisjoint(subgraphs[j]):
subgraphs[i] |= subgraphs[j]
subgraphs.pop(j)
else:
j += 1
i += 1
# recursive core removal
for s in subgraphs:
tresults = __core_removal(
dict((v, graph[v] & s) for v in s), density_threshold
)
for tc in tresults:
nodes = set()
for v, n in tc.items():
nodes.add(v)
n |= graph[v] & core_nodes
for c in core_nodes:
tc[c] = graph[c] & (nodes | core_nodes)
result += tresults
return result
def co_ach(g, density_threshold=0.7, affinity_threshold=0.225, closeness_threshold=0.5):
# read protein-protein pairs
data = defaultdict(set)
for a, b in g.edges():
data[a].add(b)
data[b].add(a)
# step 1: find preliminary cores
SC = [] # currently-detected preliminary cores
count = 0
for vertex, neighbors in data.items():
# build neighborhood graph
vertices = {vertex} | neighbors
size1_neighbors = set()
graph = {}
for v in vertices:
n = data[v] & vertices
if len(n) > 1: # ignore size-1 vertices
graph[v] = n
else:
size1_neighbors.add(v)
if len(graph) < 2: # not enough connections in this graph
continue
graph[vertex] -= size1_neighbors
# get core graph
avg_deg, density = __graph_stats(graph)
core_nodes = __get_core_nodes(graph, avg_deg)
vertices = set(graph.keys())
for v in vertices - core_nodes:
del graph[v]
for n in graph.values():
n &= core_nodes
if len(graph) < 2: # not enough connections in this graph
continue
graph_nodes = set(graph)
# inner loop
for sg in __core_removal(graph, density_threshold):
while True:
_, density = __graph_stats(sg)
# if density threshold met, stop; else, remove min degree node
if density >= density_threshold:
break
w = min(sg.items(), key=lambda k: len(k[1]))[0]
del sg[w]
for n in sg.values():
n.discard(w)
sg_nodes = set(sg)
while graph_nodes - sg_nodes:
w = max(graph_nodes - sg_nodes, key=lambda v: len(graph[v] & sg_nodes))
new_sg = sg.copy()
for v, n in new_sg.items():
if w in graph[v]:
n.add(w)
new_sg[w] = graph[w] & sg_nodes
_, density = __graph_stats(new_sg)
if density < density_threshold:
break
sg = new_sg
sg_nodes.add(w)
# redundancy filtering
max_sim = -1
for i in range(len(SC)):
sim = __NA_score(set(SC[i]), sg_nodes)
if sim > max_sim:
max_sim = sim
index = i
if max_sim < affinity_threshold:
SC.append(sg)
else:
_, density_i = __graph_stats(SC[index])
if density * len(sg) > density_i * len(SC[index]):
SC[index] = sg
# step 2: adding peripheral proteins
clusters = set()
for core in SC:
nodes = frozenset(core)
neighbors = (
functools.reduce(lambda x, y: x | y, (data[v] for v in nodes)) - nodes
)
neighbors -= set(
v
for v in neighbors
if float(len(data[v] & nodes)) / len(nodes) <= closeness_threshold
)
clusters.add(nodes | neighbors)
return [list(c) for c in clusters]
|
[
"functools.reduce",
"collections.defaultdict"
] |
[((2492, 2508), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (2503, 2508), False, 'from collections import defaultdict\n'), ((5261, 5323), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x | y)', '(data[v] for v in nodes)'], {}), '(lambda x, y: x | y, (data[v] for v in nodes))\n', (5277, 5323), False, 'import functools\n')]
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.constants import DEFAULT_QUEUE
from common.waterfall import failure_type
from gae_libs.pipeline_wrapper import pipeline_handlers
from waterfall import create_revert_cl_pipeline
from waterfall.create_revert_cl_pipeline import CreateRevertCLPipeline
from waterfall.revert_and_notify_culprit_pipeline import (
RevertAndNotifyCulpritPipeline)
from waterfall.send_notification_for_culprit_pipeline import (
SendNotificationForCulpritPipeline)
from waterfall.test import wf_testcase
class RevertAndNotifyCulpritPipelineTest(wf_testcase.WaterfallTestCase):
app_module = pipeline_handlers._APP
def testSendNotificationForTestCulprit(self):
master_name = 'm'
builder_name = 'b'
build_number = 124
repo_name = 'chromium'
revision = 'r1'
culprits = {
'r1': {
'repo_name': repo_name,
'revision': revision,
}
}
heuristic_cls = [[repo_name, revision]]
try_job_type = failure_type.TEST
self.MockPipeline(SendNotificationForCulpritPipeline,
None,
expected_args=[master_name, builder_name, build_number,
repo_name, revision, True])
pipeline = RevertAndNotifyCulpritPipeline(
master_name, builder_name, build_number, culprits,
heuristic_cls, try_job_type)
pipeline.start(queue_name=DEFAULT_QUEUE)
self.execute_queued_tasks()
def testSendNotificationToConfirmRevert(self):
master_name = 'm'
builder_name = 'b'
build_number = 124
repo_name = 'chromium'
revision = 'r1'
culprits = {
'r1': {
'repo_name': repo_name,
'revision': revision,
}
}
heuristic_cls = [[repo_name, revision]]
try_job_type = failure_type.COMPILE
self.MockPipeline(CreateRevertCLPipeline,
create_revert_cl_pipeline.CREATED_BY_SHERIFF,
expected_args=[master_name, builder_name, build_number,
repo_name, revision])
self.MockPipeline(SendNotificationForCulpritPipeline,
None,
expected_args=[
master_name, builder_name, build_number, repo_name,
revision, True,
create_revert_cl_pipeline.CREATED_BY_SHERIFF])
pipeline = RevertAndNotifyCulpritPipeline(
master_name, builder_name, build_number, culprits,
heuristic_cls, try_job_type)
pipeline.start(queue_name=DEFAULT_QUEUE)
self.execute_queued_tasks()
|
[
"waterfall.revert_and_notify_culprit_pipeline.RevertAndNotifyCulpritPipeline"
] |
[((1383, 1497), 'waterfall.revert_and_notify_culprit_pipeline.RevertAndNotifyCulpritPipeline', 'RevertAndNotifyCulpritPipeline', (['master_name', 'builder_name', 'build_number', 'culprits', 'heuristic_cls', 'try_job_type'], {}), '(master_name, builder_name, build_number,\n culprits, heuristic_cls, try_job_type)\n', (1413, 1497), False, 'from waterfall.revert_and_notify_culprit_pipeline import RevertAndNotifyCulpritPipeline\n'), ((2541, 2655), 'waterfall.revert_and_notify_culprit_pipeline.RevertAndNotifyCulpritPipeline', 'RevertAndNotifyCulpritPipeline', (['master_name', 'builder_name', 'build_number', 'culprits', 'heuristic_cls', 'try_job_type'], {}), '(master_name, builder_name, build_number,\n culprits, heuristic_cls, try_job_type)\n', (2571, 2655), False, 'from waterfall.revert_and_notify_culprit_pipeline import RevertAndNotifyCulpritPipeline\n')]
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
class LSTM(nn.Module):
def __init__(self, embedding_matrix, embedding_dim, vocab_size, hidden_dim, dropout, num_layers, bidirectional, output_dim):
"""
Args:
embedding_matrix: Pre-trained word embeddings matrix
embedding_dim: Embedding dimension of the word embeddings
vocab_size: Dimension of the vocabulary
hidden_dim: Dimension of the hiddden states
dropout: Dropout probability
num_layers: Number of layers of the LSTM
bidirectional: Bidiredctional
output_dim: Number of output classes (Subtask A: 2 = (OFF, NOT))
"""
super(LSTM, self).__init__()
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.bidirectional = bidirectional
#Word embeddings
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.word_embeddings.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32), requires_grad=False)
#Dropout
self.dropout = dropout
#LSTM layer(s)
if(self.bidirectional):
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2 , num_layers, dropout=self.dropout, bidirectional=True)
else:
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers, dropout=self.dropout)
#Linear layer
self.output = nn.Linear(in_features=hidden_dim, out_features=output_dim)
def forward(self, X):
#Word embeddings
embedded = self.word_embeddings(X)
embedded = embedded.permute(1,0,2)
#Batch size
batch_size = X.size(0)
#Initial hidden state
if(self.bidirectional):
h0 = Variable(torch.zeros(2*self.num_layers, batch_size, self.hidden_dim // 2))
c0 = Variable(torch.zeros(2*self.num_layers, batch_size, self.hidden_dim // 2))
else:
h0 = Variable(torch.zeros(self.num_layers, batch_size, self.hidden_dim))
c0 = Variable(torch.zeros(self.num_layers, batch_size, self.hidden_dim))
#Forward state
output, (hidden_state, cell_state) = self.lstm(embedded, (h0, c0))
x = self.output(output[-1])
return x
|
[
"torch.nn.LSTM",
"torch.tensor",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Embedding"
] |
[((991, 1030), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (1003, 1030), True, 'import torch.nn as nn\n'), ((1532, 1590), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'hidden_dim', 'out_features': 'output_dim'}), '(in_features=hidden_dim, out_features=output_dim)\n', (1541, 1590), True, 'import torch.nn as nn\n'), ((1082, 1133), 'torch.tensor', 'torch.tensor', (['embedding_matrix'], {'dtype': 'torch.float32'}), '(embedding_matrix, dtype=torch.float32)\n', (1094, 1133), False, 'import torch\n'), ((1285, 1382), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', '(hidden_dim // 2)', 'num_layers'], {'dropout': 'self.dropout', 'bidirectional': '(True)'}), '(embedding_dim, hidden_dim // 2, num_layers, dropout=self.dropout,\n bidirectional=True)\n', (1292, 1382), True, 'import torch.nn as nn\n'), ((1418, 1486), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim', 'num_layers'], {'dropout': 'self.dropout'}), '(embedding_dim, hidden_dim, num_layers, dropout=self.dropout)\n', (1425, 1486), True, 'import torch.nn as nn\n'), ((1872, 1938), 'torch.zeros', 'torch.zeros', (['(2 * self.num_layers)', 'batch_size', '(self.hidden_dim // 2)'], {}), '(2 * self.num_layers, batch_size, self.hidden_dim // 2)\n', (1883, 1938), False, 'import torch\n'), ((1964, 2030), 'torch.zeros', 'torch.zeros', (['(2 * self.num_layers)', 'batch_size', '(self.hidden_dim // 2)'], {}), '(2 * self.num_layers, batch_size, self.hidden_dim // 2)\n', (1975, 2030), False, 'import torch\n'), ((2070, 2127), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'batch_size', 'self.hidden_dim'], {}), '(self.num_layers, batch_size, self.hidden_dim)\n', (2081, 2127), False, 'import torch\n'), ((2155, 2212), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'batch_size', 'self.hidden_dim'], {}), '(self.num_layers, batch_size, self.hidden_dim)\n', (2166, 2212), False, 'import torch\n')]
|
from django.core.urlresolvers import resolve
from django.shortcuts import render,redirect,HttpResponse
from kingadmin.permission_list import perm_dic
from django.conf import settings
def perm_check(*args,**kwargs):
request = args[0]
resolve_url_obj = resolve(request.path)
current_url_name = resolve_url_obj.url_name # 当前url的url_name
print('---perm:',request.user,request.user.is_authenticated(),current_url_name)
#match_flag = False
match_results = [None,]
match_key = None
if request.user.is_authenticated() is False:
return redirect(settings.LOGIN_URL)
for permission_key,permission_val in perm_dic.items():
per_url_name = permission_val[0]
per_method = permission_val[1]
perm_args = permission_val[2]
perm_kwargs = permission_val[3]
perm_hook_func = permission_val[4] if len(permission_val)>4 else None
if per_url_name == current_url_name: #matches current request url
if per_method == request.method: #matches request method
# if not perm_args: #if no args defined in perm dic, then set this request to passed perm
#逐个匹配参数,看每个参数时候都能对应的上。
args_matched = False #for args only
for item in perm_args:
request_method_func = getattr(request,per_method) #request.GET/POST
if request_method_func.get(item,None):# request字典中有此参数
args_matched = True
else:
print("arg not match......")
args_matched = False
break # 有一个参数不能匹配成功,则判定为假,退出该循环。
else:#当列表为空的时候才走这里
args_matched = True
#匹配有特定值的参数
kwargs_matched = False
for k,v in perm_kwargs.items():
request_method_func = getattr(request, per_method)
arg_val = request_method_func.get(k, None) # request字典中有此参数
print("perm kwargs check:",arg_val,type(arg_val),v,type(v))
if arg_val == str(v): #匹配上了特定的参数 及对应的 参数值, 比如,需要request 对象里必须有一个叫 user_id=3的参数
kwargs_matched = True
else:
kwargs_matched = False
break # 有一个参数不能匹配成功,则判定为假,退出该循环。
else:
kwargs_matched = True
#开始匹配自定义权限钩子函数
perm_hook_matched = False
if perm_hook_func:
perm_hook_matched = perm_hook_func(request)
match_results = [args_matched,kwargs_matched,perm_hook_matched]
print("--->match_results ", match_results)
if all(match_results): #都匹配上了
match_key = permission_key
break
if all(match_results):
app_name, *per_name = match_key.split('_')
print("--->matched ",match_results,match_key)
print(app_name, *per_name)
perm_obj = '%s.%s' % (app_name,match_key)
print("perm str:",perm_obj)
if request.user.has_perm(perm_obj):
print('当前用户有此权限')
return True
else:
print('当前用户没有该权限')
return False
else:
print("未匹配到权限项,当前用户无权限")
def check_permission(func):
def inner(*args,**kwargs):
if not perm_check(*args,**kwargs):
request = args[0]
return render(request,'kingadmin/page_403.html')
return func(*args,**kwargs)
return inner
|
[
"django.shortcuts.render",
"django.core.urlresolvers.resolve",
"django.shortcuts.redirect",
"kingadmin.permission_list.perm_dic.items"
] |
[((262, 283), 'django.core.urlresolvers.resolve', 'resolve', (['request.path'], {}), '(request.path)\n', (269, 283), False, 'from django.core.urlresolvers import resolve\n'), ((644, 660), 'kingadmin.permission_list.perm_dic.items', 'perm_dic.items', ([], {}), '()\n', (658, 660), False, 'from kingadmin.permission_list import perm_dic\n'), ((572, 600), 'django.shortcuts.redirect', 'redirect', (['settings.LOGIN_URL'], {}), '(settings.LOGIN_URL)\n', (580, 600), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((3483, 3525), 'django.shortcuts.render', 'render', (['request', '"""kingadmin/page_403.html"""'], {}), "(request, 'kingadmin/page_403.html')\n", (3489, 3525), False, 'from django.shortcuts import render, redirect, HttpResponse\n')]
|
#!/usr/bin/env python3
from time import sleep
import logging
import os
import subprocess
print("Nebra ECC Tool")
preTestFail = 0
afterTestFail = 0
ECC_SUCCESSFUL_TOUCH_FILEPATH = "/var/data/gwmfr_ecc_provisioned"
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
def record_successful_provision():
logging.debug("ECC provisioning complete")
# Via: https://stackoverflow.com/questions/12654772/create-empty-file-using-python/12654798
# because path lib not included in python3-minimal
# https://stackoverflow.com/questions/1158076/implement-touch-using-python
open(ECC_SUCCESSFUL_TOUCH_FILEPATH, 'a').close()
logging.debug("ECC provisioning recorded. Touched to %s" % ECC_SUCCESSFUL_TOUCH_FILEPATH)
while preTestFail < 10:
preTest = subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "onboarding"], capture_output=True)
preTestResult = str(preTest.stdout.decode('ascii')).rstrip()
if "not responding to pings" not in preTestResult:
break
else:
print("Can't load provisioning tool, retrying")
preTestFail += 1
sleep(2)
if "ecc_response_exec_error" in preTestResult:
print("Provisioning")
while afterTestFail < 5:
subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "provision"])
print("Testing")
afterTest = subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "onboarding"], capture_output=True).stdout
afterTestResult = str(afterTest.decode('ascii')).rstrip()
print(afterTestResult)
if "ecc_response_exec_error" in afterTestResult:
print("\033[91mProgramming FAILED\033[0m")
print("Retrying provisioning")
afterTestFail += 1
sleep(2)
elif (len(afterTestResult) == 51 or len(afterTestResult) == 52):
print("\033[92mProgramming Success!\033[0m")
record_successful_provision()
break
else:
print("\033[91mAn Unknown Error Occured\033[0m")
print("Retrying provisioning")
afterTestFail += 1
sleep(2)
elif (len(preTestResult) == 50 or len(preTestResult) == 51 or len(preTestResult) == 52):
print("\033[93mKey Already Programmed\033[0m")
print(preTestResult)
record_successful_provision()
else:
print("An Unknown Error Occured")
print(preTestResult)
# This next bit of mank is so we can run the gwmfr container for longer
# by providing the OVERRIDE_GWMFR_EXIT environment variable for trouble
# shooting purposes.
if os.getenv('OVERRIDE_GWMFR_EXIT', None):
while(True):
print("GWMFR Utility Exit Overriden")
sleep(300)
|
[
"logging.debug",
"os.getenv",
"subprocess.run",
"os.environ.get",
"time.sleep"
] |
[((2556, 2594), 'os.getenv', 'os.getenv', (['"""OVERRIDE_GWMFR_EXIT"""', 'None'], {}), "('OVERRIDE_GWMFR_EXIT', None)\n", (2565, 2594), False, 'import os\n'), ((321, 363), 'logging.debug', 'logging.debug', (['"""ECC provisioning complete"""'], {}), "('ECC provisioning complete')\n", (334, 363), False, 'import logging\n'), ((651, 744), 'logging.debug', 'logging.debug', (["('ECC provisioning recorded. Touched to %s' % ECC_SUCCESSFUL_TOUCH_FILEPATH)"], {}), "('ECC provisioning recorded. Touched to %s' %\n ECC_SUCCESSFUL_TOUCH_FILEPATH)\n", (664, 744), False, 'import logging\n'), ((781, 879), 'subprocess.run', 'subprocess.run', (["['/opt/gateway_mfr/bin/gateway_mfr', 'ecc', 'onboarding']"], {'capture_output': '(True)'}), "(['/opt/gateway_mfr/bin/gateway_mfr', 'ecc', 'onboarding'],\n capture_output=True)\n", (795, 879), False, 'import subprocess\n'), ((243, 278), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""DEBUG"""'], {}), "('LOGLEVEL', 'DEBUG')\n", (257, 278), False, 'import os\n'), ((1109, 1117), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1114, 1117), False, 'from time import sleep\n'), ((1230, 1302), 'subprocess.run', 'subprocess.run', (["['/opt/gateway_mfr/bin/gateway_mfr', 'ecc', 'provision']"], {}), "(['/opt/gateway_mfr/bin/gateway_mfr', 'ecc', 'provision'])\n", (1244, 1302), False, 'import subprocess\n'), ((2667, 2677), 'time.sleep', 'sleep', (['(300)'], {}), '(300)\n', (2672, 2677), False, 'from time import sleep\n'), ((1350, 1448), 'subprocess.run', 'subprocess.run', (["['/opt/gateway_mfr/bin/gateway_mfr', 'ecc', 'onboarding']"], {'capture_output': '(True)'}), "(['/opt/gateway_mfr/bin/gateway_mfr', 'ecc', 'onboarding'],\n capture_output=True)\n", (1364, 1448), False, 'import subprocess\n'), ((1748, 1756), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1753, 1756), False, 'from time import sleep\n'), ((2108, 2116), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2113, 2116), False, 'from time import sleep\n')]
|
#!/usr/bin/env python3
""" Simple example/demo of the unlinkable DP-3T design
This demo simulates some interactions between two phones,
represented by the contact tracing modules, and then runs
contact tracing.
"""
__copyright__ = """
Copyright 2020 EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
from datetime import timedelta
from dp3t.protocols.unlinkable import ContactTracer, TracingDataBatch
def report_broadcasted_ephids(name, app):
"""
Convenience function to report some broadcasted EphIDs
"""
reporting_time = app.start_of_today + timedelta(hours=10)
ephid = app.get_ephid_for_time(reporting_time)
print("At {}: {} broadcasts {}".format(reporting_time.time(), name, ephid.hex()))
def report_day(time):
"""
Convenience function to report start of the day
"""
print("---- {} ----".format(time))
def process_single_day(alice, bob, interaction_time=None):
"""
Convenience function, process and report on a single day
"""
report_day(alice.today)
report_broadcasted_ephids("Alice", alice)
report_broadcasted_ephids("Bob", bob)
if interaction_time:
print("Alice and Bob interact:")
ephid_bob = bob.get_ephid_for_time(interaction_time)
alice.add_observation(ephid_bob, interaction_time)
print(" Alice observes Bob's EphID {}".format(ephid_bob.hex()))
ephid_alice = alice.get_ephid_for_time(interaction_time)
bob.add_observation(ephid_alice, interaction_time)
print(" Bob observes Alice's EphID {}".format(ephid_alice.hex()))
else:
print("Alice and Bob do not interact")
# Advance to the next day
alice.next_day()
bob.next_day()
print("")
def main():
alice = ContactTracer()
bob = ContactTracer()
### Interaction ###
process_single_day(alice, bob)
process_single_day(alice, bob)
interaction_time = alice.start_of_today + timedelta(hours=10)
bob_contagious_start = bob.start_of_today
process_single_day(alice, bob, interaction_time)
print("... skipping 3 days ...\n")
for _ in range(4):
alice.next_day()
bob.next_day()
### Diagnosis and reporting ###
report_day(alice.today)
print("Bob is diagnosed with SARS-CoV-2")
print(
"Doctor establishes that Bob started being contagious at {}".format(
bob_contagious_start
)
)
print("And that Bob was contagious for 3 days")
bob_contagious_end = bob_contagious_start + timedelta(days=3)
print("\n[Bob -> Server] Bob sends:")
tracing_info_bob = bob.get_tracing_information(
bob_contagious_start, bob_contagious_end
)
print(
" * his seeds for the time period {} to {}".format(
bob_contagious_start, bob_contagious_end
)
)
print(" * and the corresponding epochs\n")
### Contact tracing ###
print("[Server] Compiles download batch by:")
print(" * Computing hashed observations given the seeds")
print(" * Inserts these into a cuckoo filter\n")
batch = TracingDataBatch([tracing_info_bob])
print("[Server -> Alice] Alice receives batch")
print(" * Alice checks if she was in contact with an infected person")
if alice.matches_with_batch(batch) > 0:
print(" * CORRECT: Alice's phone concludes she is at risk")
else:
print(" * ERROR: Alice's phone does not conclude she is at risk")
if __name__ == "__main__":
main()
|
[
"datetime.timedelta",
"dp3t.protocols.unlinkable.ContactTracer",
"dp3t.protocols.unlinkable.TracingDataBatch"
] |
[((2289, 2304), 'dp3t.protocols.unlinkable.ContactTracer', 'ContactTracer', ([], {}), '()\n', (2302, 2304), False, 'from dp3t.protocols.unlinkable import ContactTracer, TracingDataBatch\n'), ((2315, 2330), 'dp3t.protocols.unlinkable.ContactTracer', 'ContactTracer', ([], {}), '()\n', (2328, 2330), False, 'from dp3t.protocols.unlinkable import ContactTracer, TracingDataBatch\n'), ((3617, 3653), 'dp3t.protocols.unlinkable.TracingDataBatch', 'TracingDataBatch', (['[tracing_info_bob]'], {}), '([tracing_info_bob])\n', (3633, 3653), False, 'from dp3t.protocols.unlinkable import ContactTracer, TracingDataBatch\n'), ((1119, 1138), 'datetime.timedelta', 'timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (1128, 1138), False, 'from datetime import timedelta\n'), ((2474, 2493), 'datetime.timedelta', 'timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (2483, 2493), False, 'from datetime import timedelta\n'), ((3053, 3070), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (3062, 3070), False, 'from datetime import timedelta\n')]
|
from django.urls import reverse
from rest_framework import status
from rest_framework.test import force_authenticate
from rest_framework_simplejwt.state import User
from core.views import DeactivateSelfAPIView, BecomeCommercialAPIView
from tests.unittests.common import APIFactoryTestCase
class BecomeCommercialAPITestCase(APIFactoryTestCase):
def setUp(self) -> None:
super(BecomeCommercialAPITestCase, self).setUp()
self.view = BecomeCommercialAPIView.as_view()
self.user = User.objects.get(username='User')
self.user_2 = User.objects.get(username='User2')
self.user_3 = User.objects.get(username='User3')
self.commercial_user = User.objects.get(username='Commercial')
def test_BecomeCommercialValid(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': '<PASSWORD>'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(User.objects.get(username='User').is_commercial)
def test_BecomeCommercialInvalid(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': '<PASSWORD>'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_BecomeCommercialUnauthenticated(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': '<PASSWORD>'
})
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_BecomeCommercialNoData(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'))
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_BecomeCommercialAlreadyCommercial(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': '<PASSWORD>'
})
force_authenticate(request, self.commercial_user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
[
"rest_framework.test.force_authenticate",
"core.views.BecomeCommercialAPIView.as_view",
"rest_framework_simplejwt.state.User.objects.get",
"django.urls.reverse"
] |
[((454, 487), 'core.views.BecomeCommercialAPIView.as_view', 'BecomeCommercialAPIView.as_view', ([], {}), '()\n', (485, 487), False, 'from core.views import DeactivateSelfAPIView, BecomeCommercialAPIView\n'), ((508, 541), 'rest_framework_simplejwt.state.User.objects.get', 'User.objects.get', ([], {'username': '"""User"""'}), "(username='User')\n", (524, 541), False, 'from rest_framework_simplejwt.state import User\n'), ((564, 598), 'rest_framework_simplejwt.state.User.objects.get', 'User.objects.get', ([], {'username': '"""User2"""'}), "(username='User2')\n", (580, 598), False, 'from rest_framework_simplejwt.state import User\n'), ((621, 655), 'rest_framework_simplejwt.state.User.objects.get', 'User.objects.get', ([], {'username': '"""User3"""'}), "(username='User3')\n", (637, 655), False, 'from rest_framework_simplejwt.state import User\n'), ((687, 726), 'rest_framework_simplejwt.state.User.objects.get', 'User.objects.get', ([], {'username': '"""Commercial"""'}), "(username='Commercial')\n", (703, 726), False, 'from rest_framework_simplejwt.state import User\n'), ((913, 951), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.user'], {}), '(request, self.user)\n', (931, 951), False, 'from rest_framework.test import force_authenticate\n'), ((1318, 1356), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.user'], {}), '(request, self.user)\n', (1336, 1356), False, 'from rest_framework.test import force_authenticate\n'), ((1911, 1949), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.user'], {}), '(request, self.user)\n', (1929, 1949), False, 'from rest_framework.test import force_authenticate\n'), ((2262, 2311), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.commercial_user'], {}), '(request, self.commercial_user)\n', (2280, 2311), False, 'from rest_framework.test import force_authenticate\n'), ((813, 853), 'django.urls.reverse', 'reverse', (['"""api_v1:core:become_commercial"""'], {}), "('api_v1:core:become_commercial')\n", (820, 853), False, 'from django.urls import reverse\n'), ((1218, 1258), 'django.urls.reverse', 'reverse', (['"""api_v1:core:become_commercial"""'], {}), "('api_v1:core:become_commercial')\n", (1225, 1258), False, 'from django.urls import reverse\n'), ((1567, 1607), 'django.urls.reverse', 'reverse', (['"""api_v1:core:become_commercial"""'], {}), "('api_v1:core:become_commercial')\n", (1574, 1607), False, 'from django.urls import reverse\n'), ((1861, 1901), 'django.urls.reverse', 'reverse', (['"""api_v1:core:become_commercial"""'], {}), "('api_v1:core:become_commercial')\n", (1868, 1901), False, 'from django.urls import reverse\n'), ((2162, 2202), 'django.urls.reverse', 'reverse', (['"""api_v1:core:become_commercial"""'], {}), "('api_v1:core:become_commercial')\n", (2169, 2202), False, 'from django.urls import reverse\n'), ((1081, 1114), 'rest_framework_simplejwt.state.User.objects.get', 'User.objects.get', ([], {'username': '"""User"""'}), "(username='User')\n", (1097, 1114), False, 'from rest_framework_simplejwt.state import User\n')]
|
# Comment
import pandas as pd
import re
from google.cloud import storage
from pathlib import Path
def load_data(filename, chunksize=10000):
good_columns = [
'created_at',
'entities',
'favorite_count',
'full_text',
'id_str',
'in_reply_to_screen_name',
'in_reply_to_status_id_str',
'is_quote_status',
'lang',
'retweet_count',
'source',
'user',
'quoted_status_id_str',
'quoted_status_permalink'
]
chunks = pd.read_json(
filename,
lines=True,
chunksize=chunksize,
dtype={
'id_str': str,
'in_reply_to_status_id_str': str,
'quoted_status_id_str': str
}
)
df = pd.concat(chunk for chunk in chunks)[good_columns]
return df
def entity_extraction(entity, component, urls=False, user_mentions=False):
try:
if urls is True:
if entity[component] == []:
return None
elif entity[component] != []:
return ','.join([url['url'] for url in entity[component]])
elif user_mentions is True:
if entity[component] == []:
return None
elif entity[component] != []:
return ','.join(
[mention['screen_name'] for mention
in entity[component]]
)
else:
if entity[component] == []:
return None
elif entity[component] != []:
return ','.join([comp['text'] for comp in entity[component]])
except Exception:
return None
def source_extract(text):
try:
regex = re.compile(r'(?<=>).*?(?=<)', re.I)
return regex.search(text).group()
except AttributeError:
return None
def quoted_status_extract(status):
try:
return status['url']
except Exception:
return None
def clean_panacea_data(dataframe):
user_components = [
'created_at',
'description',
'favourites_count',
'followers_count',
'friends_count',
'id_str',
'location',
'name',
'profile_image_url_https',
'screen_name',
'statuses_count',
'verified'
]
dataframe['hashtags'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'hashtags'))
dataframe['symbols'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'symbols'))
dataframe['urls'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'urls', urls=True))
dataframe['user_mentions'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'user_mentions',
user_mentions=True))
dataframe['tweet_source'] = dataframe['source'].apply(source_extract)
for comp in user_components:
dataframe[f'user_{comp}'] = dataframe['user']\
.apply(lambda user: user[comp])
dataframe['quoted_status_url'] = dataframe['quoted_status_permalink']\
.apply(quoted_status_extract)
dataframe.drop(labels=[
'user',
'entities',
'source',
'quoted_status_permalink'
], axis=1, inplace=True)
dataframe.fillna('none', inplace=True)
return dataframe
def cleaning_wrapper(date):
print('Loading data...')
df = load_data(f'{date}/{date}_clean-dataset.json')
print('Cleaning data...')
df = clean_panacea_data(dataframe=df)
print(f'Cleaned data, converting data for date {date} to pickle format...')
df.to_pickle(f'{date}/{date}_clean-dataset.pkl')
def download_blob(bucket_name, source_blob_name, destination_file_name):
"""Downloads a blob from the bucket."""
# bucket_name = "your-bucket-name"
# source_blob_name = "storage-object-name"
# destination_file_name = "local/path/to/file"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print(f"Blob {source_blob_name} downloaded to {destination_file_name}.")
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(f"File {source_file_name} uploaded to {destination_blob_name}.")
def main():
date = input('Date whose data will be cleaned (format: YYYY-MM-DD):\n')
bucket_name = 'thepanacealab_covid19twitter'
download_blob(
bucket_name=bucket_name,
source_blob_name=f'''
dailies/{date}/panacealab_{date}_clean-dataset.json
''',
destination_file_name=f'{date}/{date}_clean-dataset.json'
)
cleaning_wrapper(date)
upload_blob(
bucket_name=bucket_name,
source_file_name=f'{date}/{date}_clean-dataset.pkl',
destination_blob_name=f'dailies/{date}/{date}_clean-dataset.pkl'
)
file_delete_path = Path.cwd() / date / f'{date}_clean-dataset.json'
file_delete_path.unlink()
print(f'{date}_clean-dataset.json removed from {date} folder.')
if __name__ == '__main__':
main()
|
[
"google.cloud.storage.Client",
"re.compile",
"pathlib.Path.cwd",
"pandas.concat",
"pandas.read_json"
] |
[((529, 674), 'pandas.read_json', 'pd.read_json', (['filename'], {'lines': '(True)', 'chunksize': 'chunksize', 'dtype': "{'id_str': str, 'in_reply_to_status_id_str': str, 'quoted_status_id_str': str}"}), "(filename, lines=True, chunksize=chunksize, dtype={'id_str':\n str, 'in_reply_to_status_id_str': str, 'quoted_status_id_str': str})\n", (541, 674), True, 'import pandas as pd\n'), ((3948, 3964), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (3962, 3964), False, 'from google.cloud import storage\n'), ((4456, 4472), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (4470, 4472), False, 'from google.cloud import storage\n'), ((764, 800), 'pandas.concat', 'pd.concat', (['(chunk for chunk in chunks)'], {}), '(chunk for chunk in chunks)\n', (773, 800), True, 'import pandas as pd\n'), ((1718, 1752), 're.compile', 're.compile', (['"""(?<=>).*?(?=<)"""', 're.I'], {}), "('(?<=>).*?(?=<)', re.I)\n", (1728, 1752), False, 'import re\n'), ((5296, 5306), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (5304, 5306), False, 'from pathlib import Path\n')]
|
import os
import glob
import torch
import numpy as np
# from PIL import Image, UnidentifiedImageError
from torch.utils.data import Dataset
from torchvision.datasets import MNIST
class ToyDataset(Dataset):
def __init__(self, N_K=50, K=4, X=None, Y=None):
super().__init__()
if X is not None:
self.data, self.targets = X, Y
else:
self.data, self.targets = self._init_data(N_K, K)
self.task_ids = torch.arange(self.targets.size(0))
def _init_data(self, N_K, K):
X1 = torch.cat([
0.8 + 0.4 * torch.randn(N_K, 1),
1.5 + 0.4 * torch.randn(N_K, 1),
], dim=-1)
Y1 = 0 * torch.ones(X1.size(0)).long()
X2 = torch.cat([
0.5 + 0.6 * torch.randn(N_K, 1),
-0.2 - 0.1 * torch.randn(N_K, 1),
], dim=-1)
Y2 = 1 * torch.ones(X2.size(0)).long()
X3 = torch.cat([
2.5 - 0.1 * torch.randn(N_K, 1),
1.0 + 0.6 * torch.randn(N_K, 1),
], dim=-1)
Y3 = 2 * torch.ones(X3.size(0)).long()
X4 = torch.distributions.MultivariateNormal(
torch.Tensor([-0.5, 1.5]),
covariance_matrix=torch.Tensor([[0.2, 0.1], [0.1, 0.1]])).sample(torch.Size([N_K]))
Y4 = 3 * torch.ones(X4.size(0)).long()
X = torch.cat([X1, X2, X3, X4], dim=0)
X[:, 1] -= 1
X[:, 0] -= 0.5
Y = torch.cat([Y1, Y2, Y3, Y4])
return X, Y
def filter_by_class(self, class_list=None):
if class_list:
mask = torch.zeros_like(self.targets).bool()
for c in class_list:
mask |= self.targets == c
else:
mask = torch.ones_like(self.targets).bool()
self.task_ids = torch.masked_select(torch.arange(self.targets.size(0)), mask)
def __getitem__(self, index):
return self.data[self.task_ids[index]], self.targets[self.task_ids[index]]
def __len__(self):
return self.task_ids.size(0)
class SplitMNIST(MNIST):
def __init__(self, *args, **kwargs):
kwargs['download'] = True
super().__init__(*args, **kwargs)
self.data = self.data.reshape(self.data.size(0), -1).float() / 255.
self.task_ids = torch.arange(self.targets.size(0))
def filter_by_class(self, class_list=None):
if class_list:
mask = torch.zeros_like(self.targets).bool()
for c in class_list:
mask |= self.targets == c
else:
mask = torch.ones_like(self.targets).bool()
self.task_ids = torch.masked_select(torch.arange(self.targets.size(0)), mask)
def filter_by_idx(self, idx):
self.data = self.data[idx]
self.targets = self.targets[idx]
self.task_ids = torch.arange(self.targets.size(0))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
return self.data[self.task_ids[index]], self.targets[self.task_ids[index]]
def __len__(self):
return self.task_ids.size(0)
class PermutedMNIST(MNIST):
@staticmethod
def create_tasks(n=1):
return [torch.randperm(784) for _ in range(n)]
def __init__(self, *args, **kwargs):
kwargs['download'] = True
super().__init__(*args, **kwargs)
self.data = self.data.reshape(self.data.size(0), -1).float() / 255.
self.perm = None
def set_task(self, perm):
assert self.perm is None, 'Cannot set task again.'
self.data = self.data[:, perm]
self.perm = perm
def filter_by_idx(self, idx):
self.data = self.data[idx]
self.targets = self.targets[idx]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
return self.data[index], self.targets[index]
|
[
"torch.ones_like",
"torch.randperm",
"torch.Tensor",
"torch.zeros_like",
"torch.Size",
"torch.randn",
"torch.cat"
] |
[((1207, 1241), 'torch.cat', 'torch.cat', (['[X1, X2, X3, X4]'], {'dim': '(0)'}), '([X1, X2, X3, X4], dim=0)\n', (1216, 1241), False, 'import torch\n'), ((1287, 1314), 'torch.cat', 'torch.cat', (['[Y1, Y2, Y3, Y4]'], {}), '([Y1, Y2, Y3, Y4])\n', (1296, 1314), False, 'import torch\n'), ((1136, 1153), 'torch.Size', 'torch.Size', (['[N_K]'], {}), '([N_K])\n', (1146, 1153), False, 'import torch\n'), ((2951, 2970), 'torch.randperm', 'torch.randperm', (['(784)'], {}), '(784)\n', (2965, 2970), False, 'import torch\n'), ((1032, 1057), 'torch.Tensor', 'torch.Tensor', (['[-0.5, 1.5]'], {}), '([-0.5, 1.5])\n', (1044, 1057), False, 'import torch\n'), ((1411, 1441), 'torch.zeros_like', 'torch.zeros_like', (['self.targets'], {}), '(self.targets)\n', (1427, 1441), False, 'import torch\n'), ((1533, 1562), 'torch.ones_like', 'torch.ones_like', (['self.targets'], {}), '(self.targets)\n', (1548, 1562), False, 'import torch\n'), ((2162, 2192), 'torch.zeros_like', 'torch.zeros_like', (['self.targets'], {}), '(self.targets)\n', (2178, 2192), False, 'import torch\n'), ((2284, 2313), 'torch.ones_like', 'torch.ones_like', (['self.targets'], {}), '(self.targets)\n', (2299, 2313), False, 'import torch\n'), ((535, 554), 'torch.randn', 'torch.randn', (['N_K', '(1)'], {}), '(N_K, 1)\n', (546, 554), False, 'import torch\n'), ((574, 593), 'torch.randn', 'torch.randn', (['N_K', '(1)'], {}), '(N_K, 1)\n', (585, 593), False, 'import torch\n'), ((693, 712), 'torch.randn', 'torch.randn', (['N_K', '(1)'], {}), '(N_K, 1)\n', (704, 712), False, 'import torch\n'), ((733, 752), 'torch.randn', 'torch.randn', (['N_K', '(1)'], {}), '(N_K, 1)\n', (744, 752), False, 'import torch\n'), ((852, 871), 'torch.randn', 'torch.randn', (['N_K', '(1)'], {}), '(N_K, 1)\n', (863, 871), False, 'import torch\n'), ((891, 910), 'torch.randn', 'torch.randn', (['N_K', '(1)'], {}), '(N_K, 1)\n', (902, 910), False, 'import torch\n'), ((1089, 1127), 'torch.Tensor', 'torch.Tensor', (['[[0.2, 0.1], [0.1, 0.1]]'], {}), '([[0.2, 0.1], [0.1, 0.1]])\n', (1101, 1127), False, 'import torch\n')]
|
from django.shortcuts import render, HttpResponse
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView, DeleteView, CreateView
from . models import OuverTimeRecord
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from django.views import View
import json
import csv
# Import for reportlab
import io
from django.http import FileResponse
from reportlab.pdfgen import canvas
# Import for Xhtm2
from django.template.loader import get_template
from xhtml2pdf import pisa
#import Xlwt
import xlwt
def index(request):
return HttpResponse('ok')
class OuverTimeRecordListView(ListView):
model = OuverTimeRecord
# paginate_by = 100 # if pagination is desired
def get_queryset(self):
logged_company = self.request.user.employee.company.id
queryset = OuverTimeRecord.objects.filter(employee=logged_company)
return queryset
class OuverTimeRecordUpdate(UpdateView):
model = OuverTimeRecord
fields = ['reason', 'hours']
#Metodo desabilitado por mudança de regra
#def form_valid(self, form):
# obj = form.save(commit=False)
# obj.employee = self.request.user.employee
# obj.save()
# return super(OuverTimeRecordUpdate, self).form_valid(form)
class OuverTimeRecordDelete(DeleteView):
model = OuverTimeRecord
success_url = reverse_lazy('ouvertime_record:ouver-time')
class OuverTimeRecordCreate(CreateView):
model = OuverTimeRecord
fields = ['reason', 'hours']
def form_valid(self, form):
obj = form.save(commit=False)
obj.employee = self.request.user.employee
obj.save()
return super(OuverTimeRecordCreate, self).form_valid(form)
class UtilizouHoraExtra(View):
def post(self, *args, **kwargs):
used = OuverTimeRecord.objects.get(id=kwargs['pk'])
used.used = True
used.save()
employee = self.request.user.employee
response = json.dumps(
{'mensagem': 'Utilizado', 'hours': float(employee.sum_overtime)})
return HttpResponse(response, content_type='application/json')
class CheckedFalse(View):
def post(self, *args, **kwargs):
used = OuverTimeRecord.objects.get(id=kwargs['pk'])
used.used = False
used.save()
employee = self.request.user.employee
response = json.dumps(
{'mensagem': 'Não Utilizado', 'hours': float(employee.sum_overtime)})
return HttpResponse(response, content_type='application/json')
# ReportLab
def some_view(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="mypdf.pdf"'
buffer = io.BytesIO()
p = canvas.Canvas(buffer)
p.drawString(200, 810, 'Relatorio de Horas ReportLab')
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
y = 790
for time in times:
p.drawString(10, y, time.reason)
p.drawString(100, y, time.employee.name)
p.drawString(200, y, str(time.hours))
p.drawString(300, y, str(time.used))
y -= 40
p.showPage()
p.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
# Xhtml2
def link_callback(uri, rel):
"""
Convert HTML URIs to absolute system paths so xhtml2pdf can access those
resources
"""
result = finders.find(uri)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = list(os.path.realpath(path) for path in result)
path = result[0]
else:
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % (sUrl, mUrl)
)
return path
def render_pdf_view(request):
template_path = 'ouvertime_record/time_report.html'
cols = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
context = {'cols': cols}
# Create a Django response object, and specify content_type as pdf
response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="report.pdf"'
response['Content-Disposition'] = 'attachment; filename="time-report.pdf"'
# find the template and render it.
template = get_template(template_path)
html = template.render(context)
# create a pdf
pisa_status = pisa.CreatePDF(
html, dest=response, link_callback=link_callback)
# if error then show some funy view
if pisa_status.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
class ExportCsv(View):
def get(self, request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="somefilename.csv"'
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
writer = csv.writer(response)
writer.writerow(['Reason', 'Employee', 'Hours', 'Used'])
for time in times:
writer.writerow(
[time.reason, time.employee.name, time.hours, time.used])
return response
# Excel
class ExportExcel(View):
def get(self, request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="export_excel.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('export_excel')
row_num = 0
columns = ['Reason', 'Employee', 'Hours', 'Used']
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num])
font_style = xlwt.XFStyle()
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
row_num = 1
for time in times:
ws.write(row_num, 0, time.reason)
ws.write(row_num, 1, time.employee.name)
ws.write(row_num, 2, time.hours)
ws.write(row_num, 3, time.used)
row_num += 1
wb.save(response)
return response
|
[
"xhtml2pdf.pisa.CreatePDF",
"django.shortcuts.HttpResponse",
"xlwt.XFStyle",
"csv.writer",
"io.BytesIO",
"django.urls.reverse_lazy",
"reportlab.pdfgen.canvas.Canvas",
"xlwt.Workbook",
"django.template.loader.get_template"
] |
[((603, 621), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""ok"""'], {}), "('ok')\n", (615, 621), False, 'from django.shortcuts import render, HttpResponse\n'), ((1385, 1428), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""ouvertime_record:ouver-time"""'], {}), "('ouvertime_record:ouver-time')\n", (1397, 1428), False, 'from django.urls import reverse_lazy\n'), ((2599, 2643), 'django.shortcuts.HttpResponse', 'HttpResponse', ([], {'content_type': '"""application/pdf"""'}), "(content_type='application/pdf')\n", (2611, 2643), False, 'from django.shortcuts import render, HttpResponse\n'), ((2731, 2743), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2741, 2743), False, 'import io\n'), ((2752, 2773), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['buffer'], {}), '(buffer)\n', (2765, 2773), False, 'from reportlab.pdfgen import canvas\n'), ((4665, 4709), 'django.shortcuts.HttpResponse', 'HttpResponse', ([], {'content_type': '"""application/pdf"""'}), "(content_type='application/pdf')\n", (4677, 4709), False, 'from django.shortcuts import render, HttpResponse\n'), ((4919, 4946), 'django.template.loader.get_template', 'get_template', (['template_path'], {}), '(template_path)\n', (4931, 4946), False, 'from django.template.loader import get_template\n'), ((5021, 5085), 'xhtml2pdf.pisa.CreatePDF', 'pisa.CreatePDF', (['html'], {'dest': 'response', 'link_callback': 'link_callback'}), '(html, dest=response, link_callback=link_callback)\n', (5035, 5085), False, 'from xhtml2pdf import pisa\n'), ((2087, 2142), 'django.shortcuts.HttpResponse', 'HttpResponse', (['response'], {'content_type': '"""application/json"""'}), "(response, content_type='application/json')\n", (2099, 2142), False, 'from django.shortcuts import render, HttpResponse\n'), ((2490, 2545), 'django.shortcuts.HttpResponse', 'HttpResponse', (['response'], {'content_type': '"""application/json"""'}), "(response, content_type='application/json')\n", (2502, 2545), False, 'from django.shortcuts import render, HttpResponse\n'), ((5174, 5232), 'django.shortcuts.HttpResponse', 'HttpResponse', (["('We had some errors <pre>' + html + '</pre>')"], {}), "('We had some errors <pre>' + html + '</pre>')\n", (5186, 5232), False, 'from django.shortcuts import render, HttpResponse\n'), ((5399, 5436), 'django.shortcuts.HttpResponse', 'HttpResponse', ([], {'content_type': '"""text/csv"""'}), "(content_type='text/csv')\n", (5411, 5436), False, 'from django.shortcuts import render, HttpResponse\n'), ((5643, 5663), 'csv.writer', 'csv.writer', (['response'], {}), '(response)\n', (5653, 5663), False, 'import csv\n'), ((5968, 6017), 'django.shortcuts.HttpResponse', 'HttpResponse', ([], {'content_type': '"""application/ms-excel"""'}), "(content_type='application/ms-excel')\n", (5980, 6017), False, 'from django.shortcuts import render, HttpResponse\n'), ((6116, 6147), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (6129, 6147), False, 'import xlwt\n'), ((6403, 6417), 'xlwt.XFStyle', 'xlwt.XFStyle', ([], {}), '()\n', (6415, 6417), False, 'import xlwt\n')]
|
import unittest as ut
import time
class test_magick(ut.TestCase):
def test_us(self):
list_r = list(range(0, 100))
for i in list_r:
with self.subTest(case=i):
self.assertEqual(magick(i), i)
def magick(x=None, start=0, stop=100):
yes = ['да', 'д', 'yes', 'y', 'ye']
while (stop >= start):
current_state = (start + stop) // 2
if x is None:
ans = input(f'Верно ли, что загаданное число меньше {current_state}?').lower()
if ans in yes:
stop = current_state - 1
else:
start = current_state + 1
elif current_state > x:
stop = current_state - 1
else:
start = current_state + 1
return stop
def main():
x = float(input('Введите число: '))
print('ваше число:', magick())
print('\n\n')
def test():
start = time.time()
magick(123123123123, 0, 10e100)
print(time.time() - start, '\n')
start = time.time()
magick(123123123123, 0, 10e250)
print(time.time() - start, '\n')
start = time.time()
magick(123123123123, 0, 10e500)
print(time.time() - start, '\n')
ut.main()
if __name__ == '__main__':
main()
# test()
|
[
"unittest.main",
"time.time"
] |
[((907, 918), 'time.time', 'time.time', ([], {}), '()\n', (916, 918), False, 'import time\n'), ((1005, 1016), 'time.time', 'time.time', ([], {}), '()\n', (1014, 1016), False, 'import time\n'), ((1103, 1114), 'time.time', 'time.time', ([], {}), '()\n', (1112, 1114), False, 'import time\n'), ((1193, 1202), 'unittest.main', 'ut.main', ([], {}), '()\n', (1200, 1202), True, 'import unittest as ut\n'), ((965, 976), 'time.time', 'time.time', ([], {}), '()\n', (974, 976), False, 'import time\n'), ((1063, 1074), 'time.time', 'time.time', ([], {}), '()\n', (1072, 1074), False, 'import time\n'), ((1161, 1172), 'time.time', 'time.time', ([], {}), '()\n', (1170, 1172), False, 'import time\n')]
|
# coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Functions for converting env episode data to tfrecords of transitions."""
import collections
import gin
import numpy as np
from PIL import Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
_bytes_feature = (
lambda v: tf.train.Feature(bytes_list=tf.train.BytesList(value=v)))
_int64_feature = (
lambda v: tf.train.Feature(int64_list=tf.train.Int64List(value=v)))
_float_feature = (
lambda v: tf.train.Feature(float_list=tf.train.FloatList(value=v)))
_IMAGE_KEY_PREFIX = 'image'
@gin.configurable
def make_fixed_length(
input_list,
fixed_length,
always_include_endpoints=True,
randomized=True):
"""Create a fixed length list by sampling entries from input_list.
Args:
input_list: The original list we sample entries from.
fixed_length: An integer: the desired length of the output list.
always_include_endpoints: If True, always include the first and last entries
of input_list in the output.
randomized: If True, select entries from input_list by random sampling with
replacement. If False, select entries from input_list deterministically.
Returns:
A list of length fixed_length containing sampled entries of input_list.
"""
original_length = len(input_list)
if original_length <= 2:
return None
if not randomized:
indices = np.sort(np.mod(np.arange(fixed_length), original_length))
return [input_list[i] for i in indices]
if always_include_endpoints:
# Always include entries 0 and N-1.
endpoint_indices = np.array([0, original_length - 1])
# The remaining (fixed_length-2) frames are sampled with replacement
# from entries [1, N-1) of input_list.
other_indices = 1 + np.random.choice(
original_length - 2, fixed_length-2, replace=True)
indices = np.concatenate(
(endpoint_indices, other_indices),
axis=0)
else:
indices = np.random.choice(
original_length, fixed_length, replace=True)
indices = np.sort(indices)
return [input_list[i] for i in indices]
@gin.configurable
def episode_to_transitions_reacher(episode_data, is_demo=False):
"""Converts reacher env data to transition examples."""
transitions = []
for i, transition in enumerate(episode_data):
del i
feature_dict = {}
(obs_t, action, reward, obs_tp1, done, debug) = transition
del debug
feature_dict['pose_t'] = _float_feature(obs_t)
feature_dict['pose_tp1'] = _float_feature(obs_tp1)
feature_dict['action'] = _float_feature(action)
feature_dict['reward'] = _float_feature([reward])
feature_dict['done'] = _int64_feature([int(done)])
feature_dict['is_demo'] = _int64_feature([int(is_demo)])
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
transitions.append(example)
return transitions
@gin.configurable
def episode_to_transitions_metareacher(episode_data):
"""Converts metareacher env data to transition examples."""
context_features = {}
feature_lists = collections.defaultdict(list)
context_features['is_demo'] = _int64_feature(
[int(episode_data[0][-1]['is_demo'])])
context_features['target_idx'] = _int64_feature(
[episode_data[0][-1]['target_idx']])
for i, transition in enumerate(episode_data):
del i
(obs_t, action, reward, obs_tp1, done, debug) = transition
del debug
feature_lists['pose_t'].append(_float_feature(obs_t))
feature_lists['pose_tp1'].append(_float_feature(obs_tp1))
feature_lists['action'].append(_float_feature(action))
feature_lists['reward'].append(_float_feature([reward]))
feature_lists['done'].append(_int64_feature([int(done)]))
tf_feature_lists = {}
for key in feature_lists:
tf_feature_lists[key] = tf.train.FeatureList(feature=feature_lists[key])
return [tf.train.SequenceExample(
context=tf.train.Features(feature=context_features),
feature_lists=tf.train.FeatureLists(feature_list=tf_feature_lists))]
|
[
"tensorflow.compat.v1.train.Features",
"tensorflow.compat.v1.train.FloatList",
"numpy.random.choice",
"numpy.sort",
"numpy.array",
"tensorflow.compat.v1.train.BytesList",
"collections.defaultdict",
"tensorflow.compat.v1.train.FeatureList",
"numpy.concatenate",
"tensorflow.compat.v1.train.Int64List",
"tensorflow.compat.v1.train.FeatureLists",
"numpy.arange"
] |
[((2614, 2630), 'numpy.sort', 'np.sort', (['indices'], {}), '(indices)\n', (2621, 2630), True, 'import numpy as np\n'), ((3634, 3663), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3657, 3663), False, 'import collections\n'), ((2168, 2202), 'numpy.array', 'np.array', (['[0, original_length - 1]'], {}), '([0, original_length - 1])\n', (2176, 2202), True, 'import numpy as np\n'), ((2434, 2491), 'numpy.concatenate', 'np.concatenate', (['(endpoint_indices, other_indices)'], {'axis': '(0)'}), '((endpoint_indices, other_indices), axis=0)\n', (2448, 2491), True, 'import numpy as np\n'), ((2531, 2592), 'numpy.random.choice', 'np.random.choice', (['original_length', 'fixed_length'], {'replace': '(True)'}), '(original_length, fixed_length, replace=True)\n', (2547, 2592), True, 'import numpy as np\n'), ((4371, 4419), 'tensorflow.compat.v1.train.FeatureList', 'tf.train.FeatureList', ([], {'feature': 'feature_lists[key]'}), '(feature=feature_lists[key])\n', (4391, 4419), True, 'import tensorflow.compat.v1 as tf\n'), ((910, 937), 'tensorflow.compat.v1.train.BytesList', 'tf.train.BytesList', ([], {'value': 'v'}), '(value=v)\n', (928, 937), True, 'import tensorflow.compat.v1 as tf\n'), ((1001, 1028), 'tensorflow.compat.v1.train.Int64List', 'tf.train.Int64List', ([], {'value': 'v'}), '(value=v)\n', (1019, 1028), True, 'import tensorflow.compat.v1 as tf\n'), ((1092, 1119), 'tensorflow.compat.v1.train.FloatList', 'tf.train.FloatList', ([], {'value': 'v'}), '(value=v)\n', (1110, 1119), True, 'import tensorflow.compat.v1 as tf\n'), ((2343, 2412), 'numpy.random.choice', 'np.random.choice', (['(original_length - 2)', '(fixed_length - 2)'], {'replace': '(True)'}), '(original_length - 2, fixed_length - 2, replace=True)\n', (2359, 2412), True, 'import numpy as np\n'), ((1987, 2010), 'numpy.arange', 'np.arange', (['fixed_length'], {}), '(fixed_length)\n', (1996, 2010), True, 'import numpy as np\n'), ((3362, 3401), 'tensorflow.compat.v1.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), '(feature=feature_dict)\n', (3379, 3401), True, 'import tensorflow.compat.v1 as tf\n'), ((4471, 4514), 'tensorflow.compat.v1.train.Features', 'tf.train.Features', ([], {'feature': 'context_features'}), '(feature=context_features)\n', (4488, 4514), True, 'import tensorflow.compat.v1 as tf\n'), ((4536, 4588), 'tensorflow.compat.v1.train.FeatureLists', 'tf.train.FeatureLists', ([], {'feature_list': 'tf_feature_lists'}), '(feature_list=tf_feature_lists)\n', (4557, 4588), True, 'import tensorflow.compat.v1 as tf\n')]
|
from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException
import pandas as pd
import requests
from StringIO import StringIO
def detect_file_extension(filename):
"""Extract and return the extension of a file given a filename.
Args:
filename (str): name of the file
Returns:
str: extension of the file
Raises:
BarryFileException: if extension not present in filename
"""
if filename is None:
raise BarryFileException("Input file name cannot be None")
split_filename = filename.split(".")
if len(split_filename) > 1:
return str(split_filename[-1]).lower()
else:
raise BarryFileException("Could not determine input file type from file extension")
def xls_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLS file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def xlsx_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLSX file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def csv_to_df(filename, skip_rows, skip_header, columns):
"""Converts a CSV file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_csv(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def url_to_df(url, skip_rows, skip_header, columns):
"""Converts a CSV from HTTP URL to Pandas dataframe.
Args:
url (str): http url of the csv
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
url_content = requests.get(url).content
return pd.read_csv(StringIO(url_content), skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def df_to_xls(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_xlsx(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_json(df, out_filename):
"""Writes a Pandas dataframe to a JSON file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_json(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_csv(df, out_filename):
"""Writes a Pandas dataframe to a CSV file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_csv(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def sort_df(df, sort_column, ascending):
"""Sort a DataFrame with the column name passed in ascending/descending order.
Args:
df (dataframe): dataframe that needs to be sorted
sort_column (str): column to be sorted on
ascending (bool): sort order, ascending if True, descending if False
Returns:
dataframe: a pandas dataframe
Raises:
BarryDFException: if there is any error while sorting the dataframe
"""
try:
return df.sort(columns=sort_column, ascending=ascending)
except Exception as e:
raise BarryDFException("Could not sort dataframe on columns %s" % (sort_column))
|
[
"StringIO.StringIO",
"exceptions.BarryFileException",
"exceptions.BarryDFException",
"pandas.read_csv",
"exceptions.BarryConversionException",
"requests.get",
"pandas.read_excel",
"exceptions.BarryExportException"
] |
[((506, 558), 'exceptions.BarryFileException', 'BarryFileException', (['"""Input file name cannot be None"""'], {}), "('Input file name cannot be None')\n", (524, 558), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((705, 782), 'exceptions.BarryFileException', 'BarryFileException', (['"""Could not determine input file type from file extension"""'], {}), "('Could not determine input file type from file extension')\n", (723, 782), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((1557, 1635), 'pandas.read_excel', 'pd.read_excel', (['filename'], {'skiprows': 'skip_rows', 'header': 'skip_header', 'names': 'columns'}), '(filename, skiprows=skip_rows, header=skip_header, names=columns)\n', (1570, 1635), True, 'import pandas as pd\n'), ((2533, 2611), 'pandas.read_excel', 'pd.read_excel', (['filename'], {'skiprows': 'skip_rows', 'header': 'skip_header', 'names': 'columns'}), '(filename, skiprows=skip_rows, header=skip_header, names=columns)\n', (2546, 2611), True, 'import pandas as pd\n'), ((3507, 3583), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'skiprows': 'skip_rows', 'header': 'skip_header', 'names': 'columns'}), '(filename, skiprows=skip_rows, header=skip_header, names=columns)\n', (3518, 3583), True, 'import pandas as pd\n'), ((1677, 1754), 'exceptions.BarryConversionException', 'BarryConversionException', (["('Could not convert file %s to dataframe' % filename)"], {}), "('Could not convert file %s to dataframe' % filename)\n", (1701, 1754), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((2653, 2730), 'exceptions.BarryConversionException', 'BarryConversionException', (["('Could not convert file %s to dataframe' % filename)"], {}), "('Could not convert file %s to dataframe' % filename)\n", (2677, 2730), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((3625, 3702), 'exceptions.BarryConversionException', 'BarryConversionException', (["('Could not convert file %s to dataframe' % filename)"], {}), "('Could not convert file %s to dataframe' % filename)\n", (3649, 3702), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((4489, 4506), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4501, 4506), False, 'import requests\n'), ((4543, 4564), 'StringIO.StringIO', 'StringIO', (['url_content'], {}), '(url_content)\n', (4551, 4564), False, 'from StringIO import StringIO\n'), ((4662, 4739), 'exceptions.BarryConversionException', 'BarryConversionException', (["('Could not convert file %s to dataframe' % filename)"], {}), "('Could not convert file %s to dataframe' % filename)\n", (4686, 4739), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((5109, 5184), 'exceptions.BarryExportException', 'BarryExportException', (["('Could not write dataframe to file %s' % out_filename)"], {}), "('Could not write dataframe to file %s' % out_filename)\n", (5129, 5184), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((5555, 5630), 'exceptions.BarryExportException', 'BarryExportException', (["('Could not write dataframe to file %s' % out_filename)"], {}), "('Could not write dataframe to file %s' % out_filename)\n", (5575, 5630), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((6001, 6076), 'exceptions.BarryExportException', 'BarryExportException', (["('Could not write dataframe to file %s' % out_filename)"], {}), "('Could not write dataframe to file %s' % out_filename)\n", (6021, 6076), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((6444, 6519), 'exceptions.BarryExportException', 'BarryExportException', (["('Could not write dataframe to file %s' % out_filename)"], {}), "('Could not write dataframe to file %s' % out_filename)\n", (6464, 6519), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n'), ((7107, 7179), 'exceptions.BarryDFException', 'BarryDFException', (["('Could not sort dataframe on columns %s' % sort_column)"], {}), "('Could not sort dataframe on columns %s' % sort_column)\n", (7123, 7179), False, 'from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException\n')]
|
from flask_script import Command
from app import db
class SeedCommand(Command):
""" Seed the DB."""
def run(self):
if (
input(
"Are you sure you want to drop all tables and recreate? (y/N)\n"
).lower() == "y"
):
print("Dropping tables...")
db.drop_all()
db.create_all()
db.session.commit()
print("DB successfully seeded.")
|
[
"app.db.create_all",
"app.db.drop_all",
"app.db.session.commit"
] |
[((332, 345), 'app.db.drop_all', 'db.drop_all', ([], {}), '()\n', (343, 345), False, 'from app import db\n'), ((358, 373), 'app.db.create_all', 'db.create_all', ([], {}), '()\n', (371, 373), False, 'from app import db\n'), ((386, 405), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (403, 405), False, 'from app import db\n')]
|
# -*- coding: utf-8 -*-
"""
Core client, used for all API requests.
"""
import os
import platform
from collections import namedtuple
from plivo.base import ResponseObject
from plivo.exceptions import (AuthenticationError, InvalidRequestError,
PlivoRestError, PlivoServerError,
ResourceNotFoundError, ValidationError)
from plivo.resources import (Accounts, Addresses, Applications, Calls,
Conferences, Endpoints, Identities, Messages,
Numbers, Pricings, Recordings, Subaccounts)
from plivo.resources.live_calls import LiveCalls
from plivo.resources.queued_calls import QueuedCalls
from plivo.utils import is_valid_mainaccount, is_valid_subaccount
from plivo.version import __version__
from requests import Request, Session
AuthenticationCredentials = namedtuple('AuthenticationCredentials',
'auth_id auth_token')
PLIVO_API = 'https://api.plivo.com'
PLIVO_API_BASE_URI = '/'.join([PLIVO_API, 'v1/Account'])
def get_user_agent():
return 'plivo-python/%s (Python: %s)' % (__version__,
platform.python_version())
def fetch_credentials(auth_id, auth_token):
"""Fetches the right credentials either from params or from environment"""
if not (auth_id and auth_token):
try:
auth_id = os.environ['PLIVO_AUTH_ID']
auth_token = os.environ['PLIVO_AUTH_TOKEN']
except KeyError:
raise AuthenticationError('The Plivo Python SDK '
'could not find your auth credentials.')
if not (is_valid_mainaccount(auth_id) or is_valid_subaccount(auth_id)):
raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id)
return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token)
class Client(object):
def __init__(self, auth_id=None, auth_token=None, proxies=None, timeout=5):
"""
The Plivo API client.
Deals with all the API requests to be made.
"""
self.base_uri = PLIVO_API_BASE_URI
self.session = Session()
self.session.headers.update({
'User-Agent': get_user_agent(),
'Content-Type': 'application/json',
'Accept': 'application/json',
})
self.session.auth = fetch_credentials(auth_id, auth_token)
self.multipart_session = Session()
self.multipart_session.headers.update({
'User-Agent': get_user_agent(),
'Cache-Control': 'no-cache',
})
self.multipart_session.auth = fetch_credentials(auth_id, auth_token)
self.proxies = proxies
self.timeout = timeout
self.account = Accounts(self)
self.subaccounts = Subaccounts(self)
self.applications = Applications(self)
self.calls = Calls(self)
self.live_calls = LiveCalls(self)
self.queued_calls = QueuedCalls(self)
self.conferences = Conferences(self)
self.endpoints = Endpoints(self)
self.messages = Messages(self)
self.numbers = Numbers(self)
self.pricing = Pricings(self)
self.recordings = Recordings(self)
self.addresses = Addresses(self)
self.identities = Identities(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
self.multipart_session.close()
def process_response(self,
method,
response,
response_type=None,
objects_type=None):
"""Processes the API response based on the status codes and method used
to access the API
"""
try:
response_json = response.json(
object_hook=
lambda x: ResponseObject(x) if isinstance(x, dict) else x)
if response_type:
r = response_type(self, response_json.__dict__)
response_json = r
if 'objects' in response_json and objects_type:
response_json.objects = [
objects_type(self, obj.__dict__)
for obj in response_json.objects
]
except ValueError:
response_json = None
if response.status_code == 400:
if response_json and 'error' in response_json:
raise ValidationError(response_json.error)
raise ValidationError(
'A parameter is missing or is invalid while accessing resource'
'at: {url}'.format(url=response.url))
if response.status_code == 401:
if response_json and 'error' in response_json:
raise AuthenticationError(response_json.error)
raise AuthenticationError(
'Failed to authenticate while accessing resource at: '
'{url}'.format(url=response.url))
if response.status_code == 404:
if response_json and 'error' in response_json:
raise ResourceNotFoundError(response_json.error)
raise ResourceNotFoundError(
'Resource not found at: {url}'.format(url=response.url))
if response.status_code == 405:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'HTTP method "{method}" not allowed to access resource at: '
'{url}'.format(method=method, url=response.url))
if response.status_code == 500:
if response_json and 'error' in response_json:
raise PlivoServerError(response_json.error)
raise PlivoServerError(
'A server error occurred while accessing resource at: '
'{url}'.format(url=response.url))
if method == 'DELETE':
if response.status_code != 204:
raise PlivoRestError('Resource at {url} could not be '
'deleted'.format(url=response.url))
elif response.status_code not in [200, 201, 202]:
raise PlivoRestError(
'Received status code {status_code} for the HTTP method '
'"{method}"'.format(
status_code=response.status_code, method=method))
return response_json
def create_request(self, method, path=None, data=None):
path = path or []
req = Request(method, '/'.join([self.base_uri, self.session.auth[0]] +
list([str(p) for p in path])) + '/',
**({
'params': data
} if method == 'GET' else {
'json': data
}))
return self.session.prepare_request(req)
def create_multipart_request(self,
method,
path=None,
data=None,
files=None):
path = path or []
data_args = {}
if method == 'GET':
data_args['params'] = data
else:
data_args['data'] = data
if files and 'file' in files and files['file'] != '':
data_args['files'] = files
req = Request(method,
'/'.join([self.base_uri, self.multipart_session.auth[0]]
+ list([str(p) for p in path])) + '/', **(
data_args))
return self.multipart_session.prepare_request(req)
def send_request(self, request, **kwargs):
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
else:
session = self.session
return session.send(
request, proxies=self.proxies, timeout=self.timeout, **kwargs)
def request(self,
method,
path=None,
data=None,
response_type=None,
objects_type=None,
files=None,
**kwargs):
if files is not None:
req = self.create_multipart_request(method, path, data, files)
session = self.multipart_session
else:
req = self.create_request(method, path, data)
session = self.session
kwargs['session'] = session
res = self.send_request(req, **kwargs)
return self.process_response(method, res, response_type, objects_type)
|
[
"requests.Session",
"plivo.resources.Addresses",
"plivo.resources.Applications",
"plivo.utils.is_valid_mainaccount",
"plivo.resources.Accounts",
"plivo.resources.live_calls.LiveCalls",
"plivo.base.ResponseObject",
"plivo.exceptions.PlivoServerError",
"collections.namedtuple",
"plivo.resources.Identities",
"plivo.resources.Calls",
"plivo.resources.Conferences",
"plivo.resources.Recordings",
"plivo.resources.queued_calls.QueuedCalls",
"plivo.resources.Subaccounts",
"platform.python_version",
"plivo.utils.is_valid_subaccount",
"plivo.resources.Pricings",
"plivo.resources.Endpoints",
"plivo.exceptions.ResourceNotFoundError",
"plivo.resources.Numbers",
"plivo.exceptions.AuthenticationError",
"plivo.exceptions.ValidationError",
"plivo.exceptions.InvalidRequestError",
"plivo.resources.Messages"
] |
[((871, 932), 'collections.namedtuple', 'namedtuple', (['"""AuthenticationCredentials"""', '"""auth_id auth_token"""'], {}), "('AuthenticationCredentials', 'auth_id auth_token')\n", (881, 932), False, 'from collections import namedtuple\n'), ((1759, 1820), 'plivo.exceptions.AuthenticationError', 'AuthenticationError', (["('Invalid auth_id supplied: %s' % auth_id)"], {}), "('Invalid auth_id supplied: %s' % auth_id)\n", (1778, 1820), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((2177, 2186), 'requests.Session', 'Session', ([], {}), '()\n', (2184, 2186), False, 'from requests import Request, Session\n'), ((2470, 2479), 'requests.Session', 'Session', ([], {}), '()\n', (2477, 2479), False, 'from requests import Request, Session\n'), ((2786, 2800), 'plivo.resources.Accounts', 'Accounts', (['self'], {}), '(self)\n', (2794, 2800), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((2828, 2845), 'plivo.resources.Subaccounts', 'Subaccounts', (['self'], {}), '(self)\n', (2839, 2845), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((2874, 2892), 'plivo.resources.Applications', 'Applications', (['self'], {}), '(self)\n', (2886, 2892), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((2914, 2925), 'plivo.resources.Calls', 'Calls', (['self'], {}), '(self)\n', (2919, 2925), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((2952, 2967), 'plivo.resources.live_calls.LiveCalls', 'LiveCalls', (['self'], {}), '(self)\n', (2961, 2967), False, 'from plivo.resources.live_calls import LiveCalls\n'), ((2996, 3013), 'plivo.resources.queued_calls.QueuedCalls', 'QueuedCalls', (['self'], {}), '(self)\n', (3007, 3013), False, 'from plivo.resources.queued_calls import QueuedCalls\n'), ((3041, 3058), 'plivo.resources.Conferences', 'Conferences', (['self'], {}), '(self)\n', (3052, 3058), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3084, 3099), 'plivo.resources.Endpoints', 'Endpoints', (['self'], {}), '(self)\n', (3093, 3099), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3124, 3138), 'plivo.resources.Messages', 'Messages', (['self'], {}), '(self)\n', (3132, 3138), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3162, 3175), 'plivo.resources.Numbers', 'Numbers', (['self'], {}), '(self)\n', (3169, 3175), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3199, 3213), 'plivo.resources.Pricings', 'Pricings', (['self'], {}), '(self)\n', (3207, 3213), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3240, 3256), 'plivo.resources.Recordings', 'Recordings', (['self'], {}), '(self)\n', (3250, 3256), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3282, 3297), 'plivo.resources.Addresses', 'Addresses', (['self'], {}), '(self)\n', (3291, 3297), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((3324, 3340), 'plivo.resources.Identities', 'Identities', (['self'], {}), '(self)\n', (3334, 3340), False, 'from plivo.resources import Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Numbers, Pricings, Recordings, Subaccounts\n'), ((1193, 1218), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (1216, 1218), False, 'import platform\n'), ((1681, 1710), 'plivo.utils.is_valid_mainaccount', 'is_valid_mainaccount', (['auth_id'], {}), '(auth_id)\n', (1701, 1710), False, 'from plivo.utils import is_valid_mainaccount, is_valid_subaccount\n'), ((1714, 1742), 'plivo.utils.is_valid_subaccount', 'is_valid_subaccount', (['auth_id'], {}), '(auth_id)\n', (1733, 1742), False, 'from plivo.utils import is_valid_mainaccount, is_valid_subaccount\n'), ((1545, 1631), 'plivo.exceptions.AuthenticationError', 'AuthenticationError', (['"""The Plivo Python SDK could not find your auth credentials."""'], {}), "(\n 'The Plivo Python SDK could not find your auth credentials.')\n", (1564, 1631), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((4513, 4549), 'plivo.exceptions.ValidationError', 'ValidationError', (['response_json.error'], {}), '(response_json.error)\n', (4528, 4549), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((4841, 4881), 'plivo.exceptions.AuthenticationError', 'AuthenticationError', (['response_json.error'], {}), '(response_json.error)\n', (4860, 4881), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((5164, 5206), 'plivo.exceptions.ResourceNotFoundError', 'ResourceNotFoundError', (['response_json.error'], {}), '(response_json.error)\n', (5185, 5206), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((5443, 5483), 'plivo.exceptions.InvalidRequestError', 'InvalidRequestError', (['response_json.error'], {}), '(response_json.error)\n', (5462, 5483), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((5787, 5824), 'plivo.exceptions.PlivoServerError', 'PlivoServerError', (['response_json.error'], {}), '(response_json.error)\n', (5803, 5824), False, 'from plivo.exceptions import AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError\n'), ((3927, 3944), 'plivo.base.ResponseObject', 'ResponseObject', (['x'], {}), '(x)\n', (3941, 3944), False, 'from plivo.base import ResponseObject\n')]
|
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def setting(name):
return getattr(settings, name, "")
#@register.filter
#def format_difference(value):
# number = int(value)
# if number > 0:
# return mark_safe('<span style="color: green">+' + str(number) + '</span>')
# elif number < 0:
# return mark_safe('<span style="color: red">' + str(number) + '</span>')
# else:
# return mark_safe(str(number))
|
[
"django.template.Library"
] |
[((119, 137), 'django.template.Library', 'template.Library', ([], {}), '()\n', (135, 137), False, 'from django import template\n')]
|
# Given a series of input numbers, count the number of times
# the values increase from one to the next.
import pandas as pd
# Part 1
sample = pd.read_csv(".\Day1\sample.txt", header=None, squeeze=True)
input = pd.read_csv(".\Day1\input.txt", header=None, squeeze=True)
#print(type(input))
ans = input.diff(1).apply(lambda x: x > 0).sum()
#print(ans)
# Part 2
#print(sample)
rolling = input.rolling(window=3,min_periods=3,center=True)
print(rolling.sum().dropna().diff(1).apply(lambda x: x > 0).sum())
|
[
"pandas.read_csv"
] |
[((145, 206), 'pandas.read_csv', 'pd.read_csv', (['""".\\\\Day1\\\\sample.txt"""'], {'header': 'None', 'squeeze': '(True)'}), "('.\\\\Day1\\\\sample.txt', header=None, squeeze=True)\n", (156, 206), True, 'import pandas as pd\n'), ((214, 274), 'pandas.read_csv', 'pd.read_csv', (['""".\\\\Day1\\\\input.txt"""'], {'header': 'None', 'squeeze': '(True)'}), "('.\\\\Day1\\\\input.txt', header=None, squeeze=True)\n", (225, 274), True, 'import pandas as pd\n')]
|
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): <NAME>, <NAME> #
# E-mail: <EMAIL> #
# Website: www.continualai.org #
################################################################################
import GPUtil
from threading import Thread
import time
import warnings
from typing import Optional, TYPE_CHECKING, List
from avalanche.evaluation import Metric, PluginMetric
from avalanche.evaluation.metric_results import MetricValue, MetricResult
from avalanche.evaluation.metric_utils import get_metric_name, \
phase_and_task, stream_type
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
class MaxGPU(Metric[float]):
"""
The standalone GPU usage metric.
Important: this metric approximates the real maximum GPU percentage
usage since it sample at discrete amount of time the GPU values.
Instances of this metric keeps the maximum GPU usage percentage detected.
The `start_thread` method starts the usage tracking.
The `stop_thread` method stops the tracking.
The result, obtained using the `result` method, is the usage in mega-bytes.
The reset method will bring the metric to its initial state. By default
this metric in its initial state will return an usage value of 0.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the GPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
self.every = every
self.gpu_id = gpu_id
n_gpus = len(GPUtil.getGPUs())
if n_gpus == 0:
warnings.warn("Your system has no GPU!")
self.gpu_id = None
elif gpu_id < 0:
warnings.warn("GPU metric called with negative GPU id."
"GPU logging disabled")
self.gpu_id = None
else:
if gpu_id >= n_gpus:
warnings.warn(f"GPU {gpu_id} not found. Using GPU 0.")
self.gpu_id = 0
self.thread = None
"""
Thread executing GPU monitoring code
"""
self.stop_f = False
"""
Flag to stop the thread
"""
self.max_usage = 0
"""
Main metric result. Max GPU usage.
"""
def _f(self):
"""
Until a stop signal is encountered,
this function monitors each `every` seconds
the maximum amount of GPU used by the process
"""
start_time = time.monotonic()
while not self.stop_f:
# GPU percentage
gpu_perc = GPUtil.getGPUs()[self.gpu_id].load * 100
if gpu_perc > self.max_usage:
self.max_usage = gpu_perc
time.sleep(self.every - ((time.monotonic() - start_time)
% self.every))
def start_thread(self):
if self.gpu_id:
assert not self.thread, "Trying to start thread " \
"without joining the previous."
self.thread = Thread(target=self._f, daemon=True)
self.thread.start()
def stop_thread(self):
if self.thread:
self.stop_f = True
self.thread.join()
self.stop_f = False
self.thread = None
def reset(self) -> None:
"""
Resets the metric.
:return: None.
"""
self.max_usage = 0
def result(self) -> Optional[float]:
"""
Returns the max GPU percentage value.
:return: The percentage GPU usage as a float value in range [0, 1].
"""
return self.max_usage
class MinibatchMaxGPU(PluginMetric[float]):
"""
The Minibatch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Minibatch Max GPU metric
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_training(self, strategy: 'BaseStrategy') \
-> None:
self._gpu.start_thread()
def before_training_iteration(self, strategy: 'BaseStrategy') -> None:
self.reset()
def after_training_iteration(self, strategy: 'BaseStrategy') \
-> MetricResult:
return self._package_result(strategy)
def after_training(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
metric_name = get_metric_name(self, strategy)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_MB"
class EpochMaxGPU(PluginMetric[float]):
"""
The Epoch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the epoch Max GPU metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_training(self, strategy: 'BaseStrategy') \
-> None:
self._gpu.start_thread()
def before_training_epoch(self, strategy) -> MetricResult:
self.reset()
def after_training_epoch(self, strategy: 'BaseStrategy') \
-> MetricResult:
return self._package_result(strategy)
def after_training(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
metric_name = get_metric_name(self, strategy)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Epoch"
class ExperienceMaxGPU(PluginMetric[float]):
"""
The Experience Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_eval(self, strategy: 'BaseStrategy') \
-> None:
self._gpu.start_thread()
def before_eval_exp(self, strategy) -> MetricResult:
self.reset()
def after_eval_exp(self, strategy: 'BaseStrategy') \
-> MetricResult:
return self._package_result(strategy)
def after_eval(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Experience"
class StreamMaxGPU(PluginMetric[float]):
"""
The Stream Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_eval(self, strategy) -> MetricResult:
self.reset()
self._gpu.start_thread()
def after_eval(self, strategy: 'BaseStrategy') \
-> MetricResult:
packed = self._package_result(strategy)
self._gpu.stop_thread()
return packed
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
phase_name, _ = phase_and_task(strategy)
stream = stream_type(strategy.experience)
metric_name = '{}/{}_phase/{}_stream' \
.format(str(self),
phase_name,
stream)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Stream"
def gpu_usage_metrics(gpu_id, every=0.5, minibatch=False, epoch=False,
experience=False, stream=False) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
:param minibatch: If True, will return a metric able to log the minibatch
max GPU usage.
:param epoch: If True, will return a metric able to log the epoch
max GPU usage.
:param experience: If True, will return a metric able to log the experience
max GPU usage.
:param stream: If True, will return a metric able to log the evaluation
max stream GPU usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchMaxGPU(gpu_id, every))
if epoch:
metrics.append(EpochMaxGPU(gpu_id, every))
if experience:
metrics.append(ExperienceMaxGPU(gpu_id, every))
if stream:
metrics.append(StreamMaxGPU(gpu_id, every))
return metrics
__all__ = [
'MaxGPU',
'MinibatchMaxGPU',
'EpochMaxGPU',
'ExperienceMaxGPU',
'StreamMaxGPU',
'gpu_usage_metrics'
]
|
[
"GPUtil.getGPUs",
"time.monotonic",
"avalanche.evaluation.metric_utils.phase_and_task",
"avalanche.evaluation.metric_results.MetricValue",
"warnings.warn",
"threading.Thread",
"avalanche.evaluation.metric_utils.get_metric_name",
"avalanche.evaluation.metric_utils.stream_type"
] |
[((3087, 3103), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3101, 3103), False, 'import time\n'), ((5440, 5471), 'avalanche.evaluation.metric_utils.get_metric_name', 'get_metric_name', (['self', 'strategy'], {}), '(self, strategy)\n', (5455, 5471), False, 'from avalanche.evaluation.metric_utils import get_metric_name, phase_and_task, stream_type\n'), ((6844, 6875), 'avalanche.evaluation.metric_utils.get_metric_name', 'get_metric_name', (['self', 'strategy'], {}), '(self, strategy)\n', (6859, 6875), False, 'from avalanche.evaluation.metric_utils import get_metric_name, phase_and_task, stream_type\n'), ((8244, 8296), 'avalanche.evaluation.metric_utils.get_metric_name', 'get_metric_name', (['self', 'strategy'], {'add_experience': '(True)'}), '(self, strategy, add_experience=True)\n', (8259, 8296), False, 'from avalanche.evaluation.metric_utils import get_metric_name, phase_and_task, stream_type\n'), ((9543, 9567), 'avalanche.evaluation.metric_utils.phase_and_task', 'phase_and_task', (['strategy'], {}), '(strategy)\n', (9557, 9567), False, 'from avalanche.evaluation.metric_utils import get_metric_name, phase_and_task, stream_type\n'), ((9585, 9617), 'avalanche.evaluation.metric_utils.stream_type', 'stream_type', (['strategy.experience'], {}), '(strategy.experience)\n', (9596, 9617), False, 'from avalanche.evaluation.metric_utils import get_metric_name, phase_and_task, stream_type\n'), ((2146, 2162), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (2160, 2162), False, 'import GPUtil\n'), ((2200, 2240), 'warnings.warn', 'warnings.warn', (['"""Your system has no GPU!"""'], {}), "('Your system has no GPU!')\n", (2213, 2240), False, 'import warnings\n'), ((3644, 3679), 'threading.Thread', 'Thread', ([], {'target': 'self._f', 'daemon': '(True)'}), '(target=self._f, daemon=True)\n', (3650, 3679), False, 'from threading import Thread\n'), ((5541, 5599), 'avalanche.evaluation.metric_results.MetricValue', 'MetricValue', (['self', 'metric_name', 'gpu_usage', 'plot_x_position'], {}), '(self, metric_name, gpu_usage, plot_x_position)\n', (5552, 5599), False, 'from avalanche.evaluation.metric_results import MetricValue, MetricResult\n'), ((6945, 7003), 'avalanche.evaluation.metric_results.MetricValue', 'MetricValue', (['self', 'metric_name', 'gpu_usage', 'plot_x_position'], {}), '(self, metric_name, gpu_usage, plot_x_position)\n', (6956, 7003), False, 'from avalanche.evaluation.metric_results import MetricValue, MetricResult\n'), ((8366, 8424), 'avalanche.evaluation.metric_results.MetricValue', 'MetricValue', (['self', 'metric_name', 'gpu_usage', 'plot_x_position'], {}), '(self, metric_name, gpu_usage, plot_x_position)\n', (8377, 8424), False, 'from avalanche.evaluation.metric_results import MetricValue, MetricResult\n'), ((9826, 9884), 'avalanche.evaluation.metric_results.MetricValue', 'MetricValue', (['self', 'metric_name', 'gpu_usage', 'plot_x_position'], {}), '(self, metric_name, gpu_usage, plot_x_position)\n', (9837, 9884), False, 'from avalanche.evaluation.metric_results import MetricValue, MetricResult\n'), ((2309, 2385), 'warnings.warn', 'warnings.warn', (['"""GPU metric called with negative GPU id.GPU logging disabled"""'], {}), "('GPU metric called with negative GPU id.GPU logging disabled')\n", (2322, 2385), False, 'import warnings\n'), ((2509, 2563), 'warnings.warn', 'warnings.warn', (['f"""GPU {gpu_id} not found. Using GPU 0."""'], {}), "(f'GPU {gpu_id} not found. Using GPU 0.')\n", (2522, 2563), False, 'import warnings\n'), ((3187, 3203), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (3201, 3203), False, 'import GPUtil\n'), ((3350, 3366), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3364, 3366), False, 'import time\n')]
|
import base64
import datetime
import io
import json
import os
import requests
from collections import namedtuple
from urllib.parse import urlparse
import faust
import numpy as np
import keras_preprocessing.image as keras_img
from avro import schema
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
from biovolume import calc_biovolume
from blob import Blob, BlobConfig
config_path = os.environ.get('IFCB_STREAM_APP_CONFIG', 'config.json')
with open(config_path) as config_file:
config = json.load(config_file)
Stats = namedtuple(
'Stats',
['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob', 'classification_time', 'biovolume', 'carbon', 'hab']
)
ClassifierStats = namedtuple(
'ClassifierStats',
['sample_name', 'prob', 'classifier', 'classification_time']
)
schema_config = {
'url': config['schema.registry.url'],
'ssl.ca.location': None
}
# need to use CachedSchemaRegistryClient to get schema
# - need to copy config because it is consumed when used in CachedSchemaRegistryClient
schema_config_copy = schema_config.copy()
cached_schema_client = CachedSchemaRegistryClient(schema_config)
key_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-key')[1])
value_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-value')[1])
key_schema = avro.loads(key_schema)
value_schema = avro.loads(value_schema)
producer = AvroProducer({
'bootstrap.servers': config['bootstrap.servers'],
'schema.registry.url': config['schema.registry.url']
},
default_key_schema=key_schema,
default_value_schema=value_schema
)
app = faust.App(
config['app_name'],
broker=config['broker'],
topic_partitions=config['topic_partitions'],
store='rocksdb://',
consumer_auto_offset_reset='earliest',
version=1
)
image_topic = app.topic(config['image_topic'])
stats_topic = app.topic(config['stats_topic'])
classifier_stats_table = app.Table('ifcb-classifier-stats', default=ClassifierStats)
diatoms = config['diatoms']
class_names = config['class_names']
hab_species = config['hab_species']
def publish_stats(feature_key, image, classifier_stats, blob_config=BlobConfig()):
"""Calculate biovolume, carbon, hab, and publish to Kafka"""
# calculate biovolume
# - scale biovolume for 3d (from ifcb-analysis)
blob = Blob(image, blob_config)
biovolume = calc_biovolume(blob)
mu = 1/3.4
biovolume = biovolume * mu ** 3
carbon = calc_carbon(classifier_stats[0], biovolume)
hab = classifier_stats[0] in hab_species
time, ifcb_id, roi = feature_key.split('_')
roi = int(roi)
timestamp = int(datetime.datetime.strptime(time[1:], '%Y%m%dT%H%M%S').timestamp())
stats = Stats(
timestamp,
ifcb_id,
roi,
classifier_stats[0],
classifier_stats[2],
classifier_stats[1],
classifier_stats[3],
biovolume,
carbon,
hab
)
# send to topic with Avro schema
producer.poll(0)
producer.produce(
topic=config['stats_topic'],
key={
'pid': f"{time}_{ifcb_id}",
'roi': int(roi)
},
value=stats._asdict()
)
producer.flush()
@app.agent(image_topic)
async def classify(images, url=config['tensorflow_url'], target_size=(224, 224)):
async for image in images:
# decode binary blob to png file then resize and normalize
image_str = base64.b64decode(image['image'])
image_file = io.BytesIO(image_str)
img = keras_img.img_to_array(
keras_img.load_img(image_file, target_size=target_size)
)
img /= 255
# create payload and send to TF RESTful API
headers = {"content-type": "application/json"}
data = json.dumps({'instances': [img.tolist()]})
result = requests.post(url, headers=headers, data=data)
# save the probabilities for each class (1d ndarray)
probs = result.json()['predictions'][0][:]
# feature_key is roi
time = datetime.datetime.fromtimestamp(image['datetime'])
feature_key = f"{time:D%Y%m%dT%H%M%S}_{image['ifcb_id']}_{image['roi']:05}"
print(f'processing {feature_key}')
# update table if current prob is greater than what is already in the table
prob = np.nanmax(probs)
if feature_key not in classifier_stats_table or prob > classifier_stats_table[feature_key].prob:
name = class_names[np.argmax(probs)]
classifier, version = get_classifier(url)
classifier_version = f'{classifier}:{version}'
classifier_stats_table[feature_key] = ClassifierStats(
name,
prob,
classifier_version,
int(datetime.datetime.utcnow().timestamp())
)
# send
publish_stats(feature_key, image_str, classifier_stats_table[feature_key])
def get_classifier(url):
"""Given TF style url, return name and version"""
parse_results = urlparse(url)
_, version, _, name_raw = parse_results.path.split('/')
name = name_raw.split(':')[0]
return (name, version)
def calc_carbon(english_name, scaled_biovolume, diatom_list=diatoms):
"""Given volume in u3/cell return carbon in pg C/cell.
$log_10(C) = log(a) + b \cdot log_10(V)$
"""
if english_name in diatom_list:
carbon = 10**(-0.665 + 0.939*np.log10(scaled_biovolume))
else:
carbon = 10**(-0.993 + 0.881*np.log10(scaled_biovolume))
return carbon
if __name__ == '__main__':
app.main()
|
[
"requests.post",
"numpy.log10",
"confluent_kafka.avro.loads",
"faust.App",
"keras_preprocessing.image.load_img",
"io.BytesIO",
"confluent_kafka.avro.cached_schema_registry_client.CachedSchemaRegistryClient",
"blob.BlobConfig",
"blob.Blob",
"numpy.nanmax",
"collections.namedtuple",
"biovolume.calc_biovolume",
"confluent_kafka.avro.AvroProducer",
"numpy.argmax",
"datetime.datetime.fromtimestamp",
"urllib.parse.urlparse",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"os.environ.get",
"base64.b64decode",
"json.load"
] |
[((637, 692), 'os.environ.get', 'os.environ.get', (['"""IFCB_STREAM_APP_CONFIG"""', '"""config.json"""'], {}), "('IFCB_STREAM_APP_CONFIG', 'config.json')\n", (651, 692), False, 'import os\n'), ((777, 911), 'collections.namedtuple', 'namedtuple', (['"""Stats"""', "['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob',\n 'classification_time', 'biovolume', 'carbon', 'hab']"], {}), "('Stats', ['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob',\n 'classification_time', 'biovolume', 'carbon', 'hab'])\n", (787, 911), False, 'from collections import namedtuple\n'), ((936, 1031), 'collections.namedtuple', 'namedtuple', (['"""ClassifierStats"""', "['sample_name', 'prob', 'classifier', 'classification_time']"], {}), "('ClassifierStats', ['sample_name', 'prob', 'classifier',\n 'classification_time'])\n", (946, 1031), False, 'from collections import namedtuple\n'), ((1336, 1377), 'confluent_kafka.avro.cached_schema_registry_client.CachedSchemaRegistryClient', 'CachedSchemaRegistryClient', (['schema_config'], {}), '(schema_config)\n', (1362, 1377), False, 'from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient\n'), ((1552, 1574), 'confluent_kafka.avro.loads', 'avro.loads', (['key_schema'], {}), '(key_schema)\n', (1562, 1574), False, 'from confluent_kafka import avro\n'), ((1590, 1614), 'confluent_kafka.avro.loads', 'avro.loads', (['value_schema'], {}), '(value_schema)\n', (1600, 1614), False, 'from confluent_kafka import avro\n'), ((1626, 1818), 'confluent_kafka.avro.AvroProducer', 'AvroProducer', (["{'bootstrap.servers': config['bootstrap.servers'], 'schema.registry.url':\n config['schema.registry.url']}"], {'default_key_schema': 'key_schema', 'default_value_schema': 'value_schema'}), "({'bootstrap.servers': config['bootstrap.servers'],\n 'schema.registry.url': config['schema.registry.url']},\n default_key_schema=key_schema, default_value_schema=value_schema)\n", (1638, 1818), False, 'from confluent_kafka.avro import AvroProducer\n'), ((1842, 2020), 'faust.App', 'faust.App', (["config['app_name']"], {'broker': "config['broker']", 'topic_partitions': "config['topic_partitions']", 'store': '"""rocksdb://"""', 'consumer_auto_offset_reset': '"""earliest"""', 'version': '(1)'}), "(config['app_name'], broker=config['broker'], topic_partitions=\n config['topic_partitions'], store='rocksdb://',\n consumer_auto_offset_reset='earliest', version=1)\n", (1851, 2020), False, 'import faust\n'), ((745, 767), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (754, 767), False, 'import json\n'), ((2389, 2401), 'blob.BlobConfig', 'BlobConfig', ([], {}), '()\n', (2399, 2401), False, 'from blob import Blob, BlobConfig\n'), ((2558, 2582), 'blob.Blob', 'Blob', (['image', 'blob_config'], {}), '(image, blob_config)\n', (2562, 2582), False, 'from blob import Blob, BlobConfig\n'), ((2599, 2619), 'biovolume.calc_biovolume', 'calc_biovolume', (['blob'], {}), '(blob)\n', (2613, 2619), False, 'from biovolume import calc_biovolume\n'), ((5241, 5254), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (5249, 5254), False, 'from urllib.parse import urlparse\n'), ((3659, 3691), 'base64.b64decode', 'base64.b64decode', (["image['image']"], {}), "(image['image'])\n", (3675, 3691), False, 'import base64\n'), ((3713, 3734), 'io.BytesIO', 'io.BytesIO', (['image_str'], {}), '(image_str)\n', (3723, 3734), False, 'import io\n'), ((4052, 4098), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'data'}), '(url, headers=headers, data=data)\n', (4065, 4098), False, 'import requests\n'), ((4257, 4307), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["image['datetime']"], {}), "(image['datetime'])\n", (4288, 4307), False, 'import datetime\n'), ((4536, 4552), 'numpy.nanmax', 'np.nanmax', (['probs'], {}), '(probs)\n', (4545, 4552), True, 'import numpy as np\n'), ((3785, 3840), 'keras_preprocessing.image.load_img', 'keras_img.load_img', (['image_file'], {'target_size': 'target_size'}), '(image_file, target_size=target_size)\n', (3803, 3840), True, 'import keras_preprocessing.image as keras_img\n'), ((2861, 2914), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['time[1:]', '"""%Y%m%dT%H%M%S"""'], {}), "(time[1:], '%Y%m%dT%H%M%S')\n", (2887, 2914), False, 'import datetime\n'), ((4689, 4705), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (4698, 4705), True, 'import numpy as np\n'), ((5635, 5661), 'numpy.log10', 'np.log10', (['scaled_biovolume'], {}), '(scaled_biovolume)\n', (5643, 5661), True, 'import numpy as np\n'), ((5710, 5736), 'numpy.log10', 'np.log10', (['scaled_biovolume'], {}), '(scaled_biovolume)\n', (5718, 5736), True, 'import numpy as np\n'), ((4987, 5013), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5011, 5013), False, 'import datetime\n')]
|
import discord
from discord.ext import commands
from discord.utils import get
class c260(commands.Cog, name="c260"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Yikilth_Lair_of_the_Abyssals', aliases=['c260', 'Abyssal_11'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Yikilth, Lair of the Abyssals',
color=0x1D9E74)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2360326.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Abyssal)', inline=True)
embed.add_field(name='Type', value='Spell/Field', inline=False)
embed.add_field(name='Card Effect', value='When this card is activated: Add 1 "Abyssal" monster from your Deck to your hand. Once per turn, when your opponent activates a card or effect that targets and/or would destroy a Set monster(s) you control: You can flip 1 Set monster you control into face-up Attack or Defense Position; negate the activation. You can only activate 1 "Yikilth, Lair of the Abyssals" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c260(bot))
|
[
"discord.Embed",
"discord.ext.commands.command"
] |
[((190, 279), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""Yikilth_Lair_of_the_Abyssals"""', 'aliases': "['c260', 'Abyssal_11']"}), "(name='Yikilth_Lair_of_the_Abyssals', aliases=['c260',\n 'Abyssal_11'])\n", (206, 279), False, 'from discord.ext import commands\n'), ((332, 399), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Yikilth, Lair of the Abyssals"""', 'color': '(1941108)'}), "(title='Yikilth, Lair of the Abyssals', color=1941108)\n", (345, 399), False, 'import discord\n')]
|
"""
This module tests utils
"""
from unittest.mock import patch, MagicMock
from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider
from superset_patchup.oauth import CustomSecurityManager
class TestUtils:
"""
Class to test the utils module
"""
@patch("superset_patchup.utils.request")
def test_is_safe_url(self, mock):
"""
Test that only urls from the same domain are set as safe
by the is_safe_url function
"""
mock.host_url = "https://example.com"
assert is_safe_url("https://example.com") is True
assert is_safe_url("https://google.com") is False
@patch("superset_patchup.utils.os.getenv")
def test_get_complex_env_var_default(self, mock):
"""
Test that the get_complex_env_var function returns the default value
when the variable is not set
"""
mock.return_value = None
default_params = {"bean": "bag"}
params = get_complex_env_var("PARAMS", default_params)
# assert that the value returned is a dictionary
assert isinstance(params, dict)
# assert that the value returned is the default
assert params == default_params
@patch("superset_patchup.utils.os.getenv")
def test_get_complex_env_var(self, mock):
"""
Test that the get_complex_env_var function is able to return a
complex variable
"""
default_params = {"bean": "bag"}
# dict variable
params_value = {"spring": "bean"}
mock.return_value = str(params_value)
params = get_complex_env_var("PARAMS", default_params)
assert isinstance(params, dict)
assert params == params_value
# bool variable
mock.return_value = "True"
bool_params = get_complex_env_var("PARAMS", default_params)
assert isinstance(bool_params, bool)
assert bool_params is True
def test_case_insensitivity_for_provider(self):
"""
Test that provider information form user can be case insesitive,
to static standard strings that they will be checked against
"""
assert is_valid_provider("opensrp", "OpenSRP")
assert is_valid_provider("OnaData", 'onadata')
assert is_valid_provider("OpenlMis", "openlmis")
assert not is_valid_provider("oensrp", "OpenSrp")
|
[
"superset_patchup.utils.is_safe_url",
"superset_patchup.utils.is_valid_provider",
"unittest.mock.patch",
"superset_patchup.utils.get_complex_env_var"
] |
[((296, 335), 'unittest.mock.patch', 'patch', (['"""superset_patchup.utils.request"""'], {}), "('superset_patchup.utils.request')\n", (301, 335), False, 'from unittest.mock import patch, MagicMock\n'), ((667, 708), 'unittest.mock.patch', 'patch', (['"""superset_patchup.utils.os.getenv"""'], {}), "('superset_patchup.utils.os.getenv')\n", (672, 708), False, 'from unittest.mock import patch, MagicMock\n'), ((1237, 1278), 'unittest.mock.patch', 'patch', (['"""superset_patchup.utils.os.getenv"""'], {}), "('superset_patchup.utils.os.getenv')\n", (1242, 1278), False, 'from unittest.mock import patch, MagicMock\n'), ((992, 1037), 'superset_patchup.utils.get_complex_env_var', 'get_complex_env_var', (['"""PARAMS"""', 'default_params'], {}), "('PARAMS', default_params)\n", (1011, 1037), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((1616, 1661), 'superset_patchup.utils.get_complex_env_var', 'get_complex_env_var', (['"""PARAMS"""', 'default_params'], {}), "('PARAMS', default_params)\n", (1635, 1661), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((1822, 1867), 'superset_patchup.utils.get_complex_env_var', 'get_complex_env_var', (['"""PARAMS"""', 'default_params'], {}), "('PARAMS', default_params)\n", (1841, 1867), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((2183, 2222), 'superset_patchup.utils.is_valid_provider', 'is_valid_provider', (['"""opensrp"""', '"""OpenSRP"""'], {}), "('opensrp', 'OpenSRP')\n", (2200, 2222), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((2238, 2277), 'superset_patchup.utils.is_valid_provider', 'is_valid_provider', (['"""OnaData"""', '"""onadata"""'], {}), "('OnaData', 'onadata')\n", (2255, 2277), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((2293, 2334), 'superset_patchup.utils.is_valid_provider', 'is_valid_provider', (['"""OpenlMis"""', '"""openlmis"""'], {}), "('OpenlMis', 'openlmis')\n", (2310, 2334), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((560, 594), 'superset_patchup.utils.is_safe_url', 'is_safe_url', (['"""https://example.com"""'], {}), "('https://example.com')\n", (571, 594), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((618, 651), 'superset_patchup.utils.is_safe_url', 'is_safe_url', (['"""https://google.com"""'], {}), "('https://google.com')\n", (629, 651), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n'), ((2354, 2392), 'superset_patchup.utils.is_valid_provider', 'is_valid_provider', (['"""oensrp"""', '"""OpenSrp"""'], {}), "('oensrp', 'OpenSrp')\n", (2371, 2392), False, 'from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider\n')]
|
# Generated by Django 2.2.5 on 2019-10-28 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20191028_1802'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='registered_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='date_registered'),
),
]
|
[
"django.db.models.DateTimeField"
] |
[((342, 413), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""date_registered"""'}), "(auto_now_add=True, verbose_name='date_registered')\n", (362, 413), False, 'from django.db import migrations, models\n')]
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.models.kmeans_utils import compute_logits
from fewshot.models.model import Model
from fewshot.models.refine_model import RefineModel
from fewshot.models.basic_model_VAT import BasicModelVAT
from fewshot.models.model_factory import RegisterModel
from fewshot.models.nnlib import (concat, weight_variable)
from fewshot.utils import logger
from fewshot.utils.debug import debug_identity
from fewshot.models.SSL_utils import *
l2_norm = lambda t: tf.sqrt(tf.reduce_sum(tf.pow(t, 2)))
log = logger.get()
@RegisterModel("basic-VAT-ENT")
class BasicModelVAT_ENT(BasicModelVAT):
def get_train_op(self, logits, y_test):
loss, train_op = BasicModelVAT.get_train_op(self, logits, y_test)
config = self.config
ENT_weight = config.ENT_weight
VAT_ENT_step_size = config.VAT_ENT_step_size
logits = self._unlabel_logits
s = tf.shape(logits)
s = s[0]
p = tf.stop_gradient(self.h_unlabel)
affinity_matrix = compute_logits(p, p) - (tf.eye(s, dtype=tf.float32) * 1000.0)
# logits = tf.Print(logits, [tf.shape(point_logits)])
ENT_loss = walking_penalty(logits, affinity_matrix)
loss += ENT_weight * ENT_loss
ENT_opt = tf.train.AdamOptimizer(VAT_ENT_step_size * self.learn_rate, name="Entropy-optimizer")
ENT_grads_and_vars = ENT_opt.compute_gradients(loss)
train_op = ENT_opt.apply_gradients(ENT_grads_and_vars)
for gradient, variable in ENT_grads_and_vars:
if gradient is None:
gradient = tf.constant(0.0)
self.adv_summaries.append(tf.summary.scalar("ENT/gradients/" + variable.name, l2_norm(gradient), family="Grads"))
self.adv_summaries.append(tf.summary.histogram("ENT/gradients/" + variable.name, gradient, family="Grads"))
self.summaries.append(tf.summary.scalar('entropy loss', ENT_loss))
return loss, train_op
|
[
"tensorflow.train.AdamOptimizer",
"tensorflow.eye",
"tensorflow.shape",
"tensorflow.pow",
"fewshot.models.basic_model_VAT.BasicModelVAT.get_train_op",
"fewshot.utils.logger.get",
"tensorflow.summary.scalar",
"tensorflow.stop_gradient",
"tensorflow.summary.histogram",
"tensorflow.constant",
"fewshot.models.kmeans_utils.compute_logits",
"fewshot.models.model_factory.RegisterModel"
] |
[((656, 668), 'fewshot.utils.logger.get', 'logger.get', ([], {}), '()\n', (666, 668), False, 'from fewshot.utils import logger\n'), ((672, 702), 'fewshot.models.model_factory.RegisterModel', 'RegisterModel', (['"""basic-VAT-ENT"""'], {}), "('basic-VAT-ENT')\n", (685, 702), False, 'from fewshot.models.model_factory import RegisterModel\n'), ((803, 851), 'fewshot.models.basic_model_VAT.BasicModelVAT.get_train_op', 'BasicModelVAT.get_train_op', (['self', 'logits', 'y_test'], {}), '(self, logits, y_test)\n', (829, 851), False, 'from fewshot.models.basic_model_VAT import BasicModelVAT\n'), ((995, 1011), 'tensorflow.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (1003, 1011), True, 'import tensorflow as tf\n'), ((1029, 1061), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.h_unlabel'], {}), '(self.h_unlabel)\n', (1045, 1061), True, 'import tensorflow as tf\n'), ((1300, 1390), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(VAT_ENT_step_size * self.learn_rate)'], {'name': '"""Entropy-optimizer"""'}), "(VAT_ENT_step_size * self.learn_rate, name=\n 'Entropy-optimizer')\n", (1322, 1390), True, 'import tensorflow as tf\n'), ((635, 647), 'tensorflow.pow', 'tf.pow', (['t', '(2)'], {}), '(t, 2)\n', (641, 647), True, 'import tensorflow as tf\n'), ((1082, 1102), 'fewshot.models.kmeans_utils.compute_logits', 'compute_logits', (['p', 'p'], {}), '(p, p)\n', (1096, 1102), False, 'from fewshot.models.kmeans_utils import compute_logits\n'), ((1856, 1899), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""entropy loss"""', 'ENT_loss'], {}), "('entropy loss', ENT_loss)\n", (1873, 1899), True, 'import tensorflow as tf\n'), ((1106, 1133), 'tensorflow.eye', 'tf.eye', (['s'], {'dtype': 'tf.float32'}), '(s, dtype=tf.float32)\n', (1112, 1133), True, 'import tensorflow as tf\n'), ((1586, 1602), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (1597, 1602), True, 'import tensorflow as tf\n'), ((1749, 1834), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('ENT/gradients/' + variable.name)", 'gradient'], {'family': '"""Grads"""'}), "('ENT/gradients/' + variable.name, gradient, family='Grads'\n )\n", (1769, 1834), True, 'import tensorflow as tf\n')]
|
import unittest
import os
import pathlib
import h5py
from desc.input_reader import InputReader
from desc.equilibrium_io import hdf5Writer, hdf5Reader
from desc.configuration import Configuration, Equilibrium
#from desc.input_output import read_input
#class TestIO(unittest.TestCase):
# """tests for input/output functions"""
#
# def test_min_input(self):
# dirname = os.path.dirname(__file__)
# filename = os.path.join(dirname, 'MIN_INPUT')
# inputs = read_input(filename)
#
# self.assertEqual(len(inputs), 26)
class TestInputReader(unittest.TestCase):
def setUp(self):
self.argv0 = []
self.argv1 = ['nonexistant_input_file']
self.argv2 = ['./tests/MIN_INPUT']
def test_no_input_file(self):
with self.assertRaises(NameError):
ir = InputReader(cl_args=self.argv0)
def test_nonexistant_input_file(self):
with self.assertRaises(FileNotFoundError):
ir = InputReader(cl_args=self.argv1)
def test_min_input(self):
ir = InputReader(cl_args=self.argv2)
#self.assertEqual(ir.args.prog, 'DESC', 'Program is incorrect.')
self.assertEqual(ir.args.input_file[0], self.argv2[0],
'Input file name does not match')
#self.assertEqual(ir.output_path, self.argv2[0] + '.output',
# 'Default output file does not match.')
self.assertEqual(ir.input_path,
str(pathlib.Path('./'+self.argv2[0]).resolve()),
'Path to input file is incorrect.')
#Test defaults
self.assertFalse(ir.args.plot, 'plot is not default False')
self.assertFalse(ir.args.quiet, 'quiet is not default False')
self.assertFalse(ir.args.verbose, 'verbose is not default False')
#self.assertEqual(ir.args.vmec_path, '', "vmec path is not default ''")
#self.assertFalse(ir.args.gpuID, 'gpu argument was given')
self.assertFalse(ir.args.numpy, 'numpy is not default False')
self.assertEqual(os.environ['DESC_USE_NUMPY'], '', 'numpy environment '
'variable incorrect with default argument')
self.assertFalse(ir.args.version, 'version is not default False')
self.assertEqual(len(ir.inputs), 28, 'number of inputs does not match '
'number expected in MIN_INPUT')
# test equality of arguments
def test_np_environ(self):
argv = self.argv2 + ['--numpy']
ir = InputReader(cl_args=argv)
self.assertEqual(os.environ['DESC_USE_NUMPY'], 'True', 'numpy '
'environment variable incorrect on use')
def test_quiet_verbose(self):
ir = InputReader(self.argv2)
self.assertEqual(ir.inputs['verbose'], 1, "value of inputs['verbose'] "
"incorrect on no arguments")
argv = self.argv2 + ['-v']
ir = InputReader(argv)
self.assertEqual(ir.inputs['verbose'], 2, "value of inputs['verbose'] "
"incorrect on verbose argument")
argv.append('-q')
ir = InputReader(argv)
self.assertEqual(ir.inputs['verbose'], 0, "value of inputs['verbose'] "
"incorrect on quiet argument")
def test_vmec_to_desc_input(self):
pass
class MockObject:
def __init__(self):
self._save_attrs_ = ['a', 'b', 'c']
class Testhdf5Writer(unittest.TestCase):
def setUp(self):
self.filename = 'writer_test_file'
self.file_mode = 'w'
def test_given_filename(self):
writer = hdf5Writer(self.filename, self.file_mode)
self.assertFalse(writer.check_type(writer.target))
self.assertTrue(writer.check_type(writer.base))
self.assertTrue(writer._close_base_)
writer.close()
self.assertFalse(writer._close_base_)
def test_given_file(self):
f = h5py.File(self.filename, self.file_mode)
writer = hdf5Writer(f, self.file_mode)
self.assertTrue(writer.check_type(writer.target))
self.assertTrue(writer.check_type(writer.base))
self.assertFalse(writer._close_base_)
#with self.assertWarns(RuntimeWarning):
# writer.close()
self.assertFalse(writer._close_base_)
f.close()
def test_close_on_delete(self):
writer = hdf5Writer(self.filename, self.file_mode)
with self.assertRaises(OSError):
newwriter = hdf5Writer(self.filename, self.file_mode)
del writer
newwriter = hdf5Writer(self.filename, self.file_mode)
del newwriter
def test_write_dict(self):
thedict = {'1':1, '2':2, '3':3}
writer = hdf5Writer(self.filename, self.file_mode)
writer.write_dict(thedict)
writer.write_dict(thedict, where=writer.sub('subgroup'))
with self.assertRaises(SyntaxError):
writer.write_dict(thedict, where='not a writable type')
writer.close()
f = h5py.File(self.filename, 'r')
g = f['subgroup']
for key in thedict.keys():
self.assertTrue(key in f.keys())
self.assertTrue(key in g.keys())
f.close()
def test_write_obj(self):
mo = MockObject()
writer = hdf5Writer(self.filename, self.file_mode)
#writer should throw runtime warning if any save_attrs are undefined
with self.assertWarns(RuntimeWarning):
writer.write_obj(mo)
writer.close()
writer = hdf5Writer(self.filename, self.file_mode)
for name in mo._save_attrs_:
setattr(mo, name, name)
writer.write_obj(mo)
groupname = 'initial'
writer.write_obj(mo, where=writer.sub(groupname))
writer.close()
f = h5py.File(self.filename, 'r')
for key in mo._save_attrs_:
self.assertTrue(key in f.keys())
self.assertTrue(groupname in f.keys())
initial = f[groupname]
for key in mo._save_attrs_:
self.assertTrue(key in initial.keys())
f.close()
class Testhdf5Reader(unittest.TestCase):
def setUp(self):
self.filename = 'reader_test_file'
self.file_mode = 'r'
self.thedict = {'a':'a', 'b':'b', 'c':'c'}
f = h5py.File(self.filename, 'w')
self.subgroup = 'subgroup'
g = f.create_group(self.subgroup)
for key in self.thedict.keys():
f.create_dataset(key, data=self.thedict[key])
g.create_dataset(key, data=self.thedict[key])
f.close()
def test_given_filename(self):
reader = hdf5Reader(self.filename)
self.assertFalse(reader.check_type(reader.target))
self.assertTrue(reader.check_type(reader.base))
self.assertTrue(reader._close_base_)
reader.close()
self.assertFalse(reader._close_base_)
def test_given_file(self):
f = h5py.File(self.filename, self.file_mode)
reader = hdf5Reader(f)
self.assertTrue(reader.check_type(reader.target))
self.assertTrue(reader.check_type(reader.base))
self.assertFalse(reader._close_base_)
#with self.assertWarns(RuntimeWarning):
# reader.close()
self.assertFalse(reader._close_base_)
f.close()
#def test_close_on_delete(self):
# reader = hdf5Reader(self.filename)
# with self.assertRaises(OSError):
# newreader = hdf5Reader(self.filename)
# del reader
# newreader = hdf5Reader(self.filename)
# del newreader
def test_read_dict(self):
reader = hdf5Reader(self.filename)
newdict = {}
newsubdict = {}
otherdict = {}
reader.read_dict(newdict)
reader.read_dict(newsubdict, where=reader.sub(self.subgroup))
with self.assertRaises(SyntaxError):
reader.read_dict(otherdict, where='not a readable type')
reader.close()
if type(newdict['a']) is bytes:
for key in newdict.keys():
newdict[key] = newdict[key].decode('ascii')
for key in newsubdict.keys():
newsubdict[key] = newsubdict[key].decode('ascii')
self.assertTrue(self.thedict == newdict)
self.assertTrue(self.thedict == newsubdict)
def test_read_obj(self):
mo = MockObject()
reader = hdf5Reader(self.filename)
reader.read_obj(mo)
mo._save_attrs_ += '4'
with self.assertWarns(RuntimeWarning):
reader.read_obj(mo)
del mo._save_attrs_[-1]
submo = MockObject()
reader.read_obj(submo, where=reader.sub(self.subgroup))
for key in mo._save_attrs_:
self.assertTrue(hasattr(mo, key))
self.assertTrue(hasattr(submo, key))
def test_load_configuration(self):
pass
def test_load_equilibrium(self):
pass
|
[
"desc.input_reader.InputReader",
"pathlib.Path",
"desc.equilibrium_io.hdf5Writer",
"h5py.File",
"desc.equilibrium_io.hdf5Reader"
] |
[((1042, 1073), 'desc.input_reader.InputReader', 'InputReader', ([], {'cl_args': 'self.argv2'}), '(cl_args=self.argv2)\n', (1053, 1073), False, 'from desc.input_reader import InputReader\n'), ((2451, 2476), 'desc.input_reader.InputReader', 'InputReader', ([], {'cl_args': 'argv'}), '(cl_args=argv)\n', (2462, 2476), False, 'from desc.input_reader import InputReader\n'), ((2650, 2673), 'desc.input_reader.InputReader', 'InputReader', (['self.argv2'], {}), '(self.argv2)\n', (2661, 2673), False, 'from desc.input_reader import InputReader\n'), ((2843, 2860), 'desc.input_reader.InputReader', 'InputReader', (['argv'], {}), '(argv)\n', (2854, 2860), False, 'from desc.input_reader import InputReader\n'), ((3025, 3042), 'desc.input_reader.InputReader', 'InputReader', (['argv'], {}), '(argv)\n', (3036, 3042), False, 'from desc.input_reader import InputReader\n'), ((3495, 3536), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (3505, 3536), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((3810, 3850), 'h5py.File', 'h5py.File', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (3819, 3850), False, 'import h5py\n'), ((3868, 3897), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['f', 'self.file_mode'], {}), '(f, self.file_mode)\n', (3878, 3897), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((4252, 4293), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (4262, 4293), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((4440, 4481), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (4450, 4481), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((4593, 4634), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (4603, 4634), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((4883, 4912), 'h5py.File', 'h5py.File', (['self.filename', '"""r"""'], {}), "(self.filename, 'r')\n", (4892, 4912), False, 'import h5py\n'), ((5156, 5197), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (5166, 5197), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((5395, 5436), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (5405, 5436), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((5662, 5691), 'h5py.File', 'h5py.File', (['self.filename', '"""r"""'], {}), "(self.filename, 'r')\n", (5671, 5691), False, 'import h5py\n'), ((6155, 6184), 'h5py.File', 'h5py.File', (['self.filename', '"""w"""'], {}), "(self.filename, 'w')\n", (6164, 6184), False, 'import h5py\n'), ((6489, 6514), 'desc.equilibrium_io.hdf5Reader', 'hdf5Reader', (['self.filename'], {}), '(self.filename)\n', (6499, 6514), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((6788, 6828), 'h5py.File', 'h5py.File', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (6797, 6828), False, 'import h5py\n'), ((6846, 6859), 'desc.equilibrium_io.hdf5Reader', 'hdf5Reader', (['f'], {}), '(f)\n', (6856, 6859), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((7473, 7498), 'desc.equilibrium_io.hdf5Reader', 'hdf5Reader', (['self.filename'], {}), '(self.filename)\n', (7483, 7498), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((8229, 8254), 'desc.equilibrium_io.hdf5Reader', 'hdf5Reader', (['self.filename'], {}), '(self.filename)\n', (8239, 8254), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((822, 853), 'desc.input_reader.InputReader', 'InputReader', ([], {'cl_args': 'self.argv0'}), '(cl_args=self.argv0)\n', (833, 853), False, 'from desc.input_reader import InputReader\n'), ((966, 997), 'desc.input_reader.InputReader', 'InputReader', ([], {'cl_args': 'self.argv1'}), '(cl_args=self.argv1)\n', (977, 997), False, 'from desc.input_reader import InputReader\n'), ((4359, 4400), 'desc.equilibrium_io.hdf5Writer', 'hdf5Writer', (['self.filename', 'self.file_mode'], {}), '(self.filename, self.file_mode)\n', (4369, 4400), False, 'from desc.equilibrium_io import hdf5Writer, hdf5Reader\n'), ((1445, 1479), 'pathlib.Path', 'pathlib.Path', (["('./' + self.argv2[0])"], {}), "('./' + self.argv2[0])\n", (1457, 1479), False, 'import pathlib\n')]
|
from unittest import skip
import unittest2
from nose.plugins.attrib import attr
from nose.tools import assert_equals
@attr('test_nose_plugin')
class TestNosePlugin(unittest2.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
"""first test, simulation passing test"""
assert_equals(1, 1)
def test_one6(self):
"""first test, simulation passing test"""
assert_equals(1, 1)
def test_three(self):
"""third test, simulation failing test"""
assert_equals(1, 1)
|
[
"nose.tools.assert_equals",
"nose.plugins.attrib.attr"
] |
[((121, 145), 'nose.plugins.attrib.attr', 'attr', (['"""test_nose_plugin"""'], {}), "('test_nose_plugin')\n", (125, 145), False, 'from nose.plugins.attrib import attr\n'), ((343, 362), 'nose.tools.assert_equals', 'assert_equals', (['(1)', '(1)'], {}), '(1, 1)\n', (356, 362), False, 'from nose.tools import assert_equals\n'), ((447, 466), 'nose.tools.assert_equals', 'assert_equals', (['(1)', '(1)'], {}), '(1, 1)\n', (460, 466), False, 'from nose.tools import assert_equals\n'), ((552, 571), 'nose.tools.assert_equals', 'assert_equals', (['(1)', '(1)'], {}), '(1, 1)\n', (565, 571), False, 'from nose.tools import assert_equals\n')]
|
# Copyright 2020-2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Charmcraft manifest.yaml related functionality."""
import datetime
import logging
import pathlib
from typing import Optional, List
import yaml
from charmcraft import __version__, config, linters
logger = logging.getLogger(__name__)
def create_manifest(
basedir: pathlib.Path,
started_at: datetime.datetime,
bases_config: Optional[config.BasesConfiguration],
linting_results: List[linters.CheckResult],
):
"""Create manifest.yaml in basedir for given base configuration.
For packing bundles, `bases` will be skipped when bases_config is None.
Charms should always include a valid bases_config.
:param basedir: Directory to create Charm in.
:param started_at: Build start time.
:param bases_config: Relevant bases configuration, if any.
:returns: Path to created manifest.yaml.
"""
content = {
"charmcraft-version": __version__,
"charmcraft-started-at": started_at.isoformat() + "Z",
}
# Annotate bases only if bases_config is not None.
if bases_config is not None:
bases = [
{
"name": r.name,
"channel": r.channel,
"architectures": r.architectures,
}
for r in bases_config.run_on
]
content["bases"] = bases
# include the linters results (only for attributes)
attributes_info = [
{"name": result.name, "result": result.result}
for result in linting_results
if result.check_type == linters.CheckType.attribute
]
content["analysis"] = {"attributes": attributes_info}
filepath = basedir / "manifest.yaml"
filepath.write_text(yaml.dump(content))
return filepath
|
[
"logging.getLogger",
"yaml.dump"
] |
[((859, 886), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (876, 886), False, 'import logging\n'), ((2319, 2337), 'yaml.dump', 'yaml.dump', (['content'], {}), '(content)\n', (2328, 2337), False, 'import yaml\n')]
|
#ecoding:utf-8
import DatasetLoader
import RICNNModel
import tensorflow as tf
import sys
import numpy as np
import regularization as re
import os
import trainLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
TRAIN_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw_100.h5'
TEST_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw.h5'
TRAIN_LABELS = '/media/liuqi/Files/dataset/rotate_100_simple.h5'
TEST_LABELS = '/home/liuqi/Desktop/mnist_rotation_new/mnist_all_rotation_normalized_float_test.amat'
LOADED_SIZE = 28
DESIRED_SIZE = 227
# model constants
NUMBER_OF_CLASSES = 10
NUMBER_OF_FILTERS = 40
NUMBER_OF_FC_FEATURES = 5120
NUMBER_OF_TRANSFORMATIONS = 8
# optimization constants
BATCH_SIZE = 64
TEST_CHUNK_SIZE = 100
ADAM_LEARNING_RATE = 1e-5
PRINTING_INTERVAL = 10
# set seeds
np.random.seed(100)
tf.set_random_seed(100)
x = tf.placeholder(tf.float32, shape=[None,
DESIRED_SIZE,
DESIRED_SIZE,
1,
NUMBER_OF_TRANSFORMATIONS])
y_gt = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASSES])
keep_prob = tf.placeholder(tf.float32)
logits, raw_feature, regularization_loss = RICNNModel.define_model(x,
keep_prob,
NUMBER_OF_CLASSES,
NUMBER_OF_FILTERS,
NUMBER_OF_FC_FEATURES)
with tf.name_scope('loss'):
with tf.name_scope('re_loss'):
re_loss = re.regu_constraint(raw_feature, logits)
with tf.name_scope('sotfmax_loss'):
sotfmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_gt))
with tf.name_scope('total_loss'):
total_loss = sotfmax_loss
train_step = tf.train.AdamOptimizer(ADAM_LEARNING_RATE).minimize(total_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.initialize_all_variables())
train_data_loader = trainLoader.DataLoader(TRAIN_FILENAME,
TRAIN_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_data_loader = DatasetLoader.DataLoader(TEST_FILENAME,
TEST_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_size = test_data_loader.all()[1].shape[0]
assert test_size % TEST_CHUNK_SIZE == 0
number_of_test_chunks = test_size / TEST_CHUNK_SIZE
while (True):
batch = train_data_loader.next_batch(BATCH_SIZE) # next_batch from the loader
txt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
if (train_data_loader.is_new_epoch()):
train_accuracy = session.run(accuracy, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss = session.run(re_loss,feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss_1 = session.run(sotfmax_loss, feed_dict={x: batch[0],
y_gt: batch[1],
keep_prob: 1.0})
print(print_loss)
print(print_loss_1)
train_context = "epochs:" + str(train_data_loader.get_completed_epochs()) + '\n'
txt_file.write(train_context)
loss_context = "softmax_loss:" + str(print_loss_1) + '\n'
txt_file.write(loss_context)
txt_file.close()
print("completed_epochs %d, training accuracy %g" %
(train_data_loader.get_completed_epochs(), train_accuracy))
sys.stdout.flush()
if (train_data_loader.get_completed_epochs() % PRINTING_INTERVAL == 0):
sum = 0.0
xt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
for chunk_index in xrange(number_of_test_chunks):
chunk = test_data_loader.next_batch(TEST_CHUNK_SIZE)
sum += session.run(accuracy, feed_dict={x : chunk[0],
y_gt : chunk[1],
keep_prob : 1.0})
test_accuracy = sum / number_of_test_chunks
new_context = "testing accuracy: " + str(test_accuracy) + '\n'
txt_file.write(new_context)
txt_file.close()
print("testing accuracy %g" % test_accuracy)
sys.stdout.flush()
session.run(train_step, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 0.5})
|
[
"tensorflow.cast",
"sys.stdout.flush",
"tensorflow.initialize_all_variables",
"trainLoader.DataLoader",
"tensorflow.placeholder",
"tensorflow.Session",
"RICNNModel.define_model",
"tensorflow.argmax",
"tensorflow.name_scope",
"numpy.random.seed",
"regularization.regu_constraint",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.train.AdamOptimizer",
"tensorflow.set_random_seed",
"DatasetLoader.DataLoader"
] |
[((801, 820), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (815, 820), True, 'import numpy as np\n'), ((821, 844), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(100)'], {}), '(100)\n', (839, 844), True, 'import tensorflow as tf\n'), ((849, 951), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, DESIRED_SIZE, DESIRED_SIZE, 1, NUMBER_OF_TRANSFORMATIONS]'}), '(tf.float32, shape=[None, DESIRED_SIZE, DESIRED_SIZE, 1,\n NUMBER_OF_TRANSFORMATIONS])\n', (863, 951), True, 'import tensorflow as tf\n'), ((1107, 1166), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, NUMBER_OF_CLASSES]'}), '(tf.float32, shape=[None, NUMBER_OF_CLASSES])\n', (1121, 1166), True, 'import tensorflow as tf\n'), ((1179, 1205), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1193, 1205), True, 'import tensorflow as tf\n'), ((1249, 1351), 'RICNNModel.define_model', 'RICNNModel.define_model', (['x', 'keep_prob', 'NUMBER_OF_CLASSES', 'NUMBER_OF_FILTERS', 'NUMBER_OF_FC_FEATURES'], {}), '(x, keep_prob, NUMBER_OF_CLASSES, NUMBER_OF_FILTERS,\n NUMBER_OF_FC_FEATURES)\n', (1272, 1351), False, 'import RICNNModel\n'), ((2047, 2059), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2057, 2059), True, 'import tensorflow as tf\n'), ((2123, 2252), 'trainLoader.DataLoader', 'trainLoader.DataLoader', (['TRAIN_FILENAME', 'TRAIN_LABELS', 'NUMBER_OF_CLASSES', 'NUMBER_OF_TRANSFORMATIONS', 'LOADED_SIZE', 'DESIRED_SIZE'], {}), '(TRAIN_FILENAME, TRAIN_LABELS, NUMBER_OF_CLASSES,\n NUMBER_OF_TRANSFORMATIONS, LOADED_SIZE, DESIRED_SIZE)\n', (2145, 2252), False, 'import trainLoader\n'), ((2453, 2582), 'DatasetLoader.DataLoader', 'DatasetLoader.DataLoader', (['TEST_FILENAME', 'TEST_LABELS', 'NUMBER_OF_CLASSES', 'NUMBER_OF_TRANSFORMATIONS', 'LOADED_SIZE', 'DESIRED_SIZE'], {}), '(TEST_FILENAME, TEST_LABELS, NUMBER_OF_CLASSES,\n NUMBER_OF_TRANSFORMATIONS, LOADED_SIZE, DESIRED_SIZE)\n', (2477, 2582), False, 'import DatasetLoader\n'), ((1485, 1506), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (1498, 1506), True, 'import tensorflow as tf\n'), ((1928, 1948), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (1937, 1948), True, 'import tensorflow as tf\n'), ((1950, 1968), 'tensorflow.argmax', 'tf.argmax', (['y_gt', '(1)'], {}), '(y_gt, 1)\n', (1959, 1968), True, 'import tensorflow as tf\n'), ((1996, 2035), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2003, 2035), True, 'import tensorflow as tf\n'), ((2072, 2101), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2099, 2101), True, 'import tensorflow as tf\n'), ((1517, 1541), 'tensorflow.name_scope', 'tf.name_scope', (['"""re_loss"""'], {}), "('re_loss')\n", (1530, 1541), True, 'import tensorflow as tf\n'), ((1561, 1600), 'regularization.regu_constraint', 're.regu_constraint', (['raw_feature', 'logits'], {}), '(raw_feature, logits)\n', (1579, 1600), True, 'import regularization as re\n'), ((1610, 1639), 'tensorflow.name_scope', 'tf.name_scope', (['"""sotfmax_loss"""'], {}), "('sotfmax_loss')\n", (1623, 1639), True, 'import tensorflow as tf\n'), ((1757, 1784), 'tensorflow.name_scope', 'tf.name_scope', (['"""total_loss"""'], {}), "('total_loss')\n", (1770, 1784), True, 'import tensorflow as tf\n'), ((1834, 1876), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['ADAM_LEARNING_RATE'], {}), '(ADAM_LEARNING_RATE)\n', (1856, 1876), True, 'import tensorflow as tf\n'), ((4127, 4145), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4143, 4145), False, 'import sys\n'), ((1679, 1746), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'y_gt'}), '(logits=logits, labels=y_gt)\n', (1718, 1746), True, 'import tensorflow as tf\n'), ((4855, 4873), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4871, 4873), False, 'import sys\n')]
|
#!/usr/bin/env python
"""Extract radial, sulcal, and gyral orientations from gyral coordinate NIFTI file"""
def main():
import argparse
parser = argparse.ArgumentParser("Extract radial, sulcal, and gyral dyads from a coord NIFTI file")
parser.add_argument('coord', help='name of the coord file')
parser.add_argument('-b', '--base', help='Basename of the output files')
parser.add_argument('-r', '--radial', help='Filename for the radial output (overrides the --base option)')
parser.add_argument('-s', '--sulcal', help='Filename for the sulcal output (overrides the --base option)')
parser.add_argument('-g', '--gyral', help='Filename for the gyral output (overrides the --base option)')
args = parser.parse_args()
from mcot.core.surface import utils
utils.gcoord_split(args)
|
[
"mcot.core.surface.utils.gcoord_split",
"argparse.ArgumentParser"
] |
[((156, 251), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Extract radial, sulcal, and gyral dyads from a coord NIFTI file"""'], {}), "(\n 'Extract radial, sulcal, and gyral dyads from a coord NIFTI file')\n", (179, 251), False, 'import argparse\n'), ((795, 819), 'mcot.core.surface.utils.gcoord_split', 'utils.gcoord_split', (['args'], {}), '(args)\n', (813, 819), False, 'from mcot.core.surface import utils\n')]
|
import pyautogui
import time
import datetime
class SwipeCard:
def __init__(self):
self.resolution = pyautogui.size()
def resolve_task(self):
try:
hide_card_position = pyautogui.center(
pyautogui.locateOnScreen(f"assets/tasks/swipe_card/main.png",
confidence=0.7))
pyautogui.click(hide_card_position[0], hide_card_position[1])
time.sleep(1)
card_position = pyautogui.center(
pyautogui.locateOnScreen(f"assets/tasks/swipe_card/card.png",
confidence=0.8))
pyautogui.moveTo(card_position[0], card_position[1])
pyautogui.mouseDown(button="left")
mouse_pos_x = card_position[0]
while (mouse_pos_x < 1450):
pyautogui.moveTo(mouse_pos_x, card_position[1])
mouse_pos_x += 60
pyautogui.click()
return True
except Exception as e:
print(e)
def log(self):
time = datetime.datetime.now()
print(
f"[{time.hour}:{time.minute}][ZADANIE] Rozwiązauje kartę w adminie"
)
def run(self):
return self.resolve_task()
|
[
"pyautogui.moveTo",
"pyautogui.locateOnScreen",
"time.sleep",
"pyautogui.size",
"pyautogui.click",
"datetime.datetime.now",
"pyautogui.mouseDown"
] |
[((114, 130), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (128, 130), False, 'import pyautogui\n'), ((1081, 1104), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1102, 1104), False, 'import datetime\n'), ((372, 433), 'pyautogui.click', 'pyautogui.click', (['hide_card_position[0]', 'hide_card_position[1]'], {}), '(hide_card_position[0], hide_card_position[1])\n', (387, 433), False, 'import pyautogui\n'), ((447, 460), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (457, 460), False, 'import time\n'), ((656, 708), 'pyautogui.moveTo', 'pyautogui.moveTo', (['card_position[0]', 'card_position[1]'], {}), '(card_position[0], card_position[1])\n', (672, 708), False, 'import pyautogui\n'), ((721, 755), 'pyautogui.mouseDown', 'pyautogui.mouseDown', ([], {'button': '"""left"""'}), "(button='left')\n", (740, 755), False, 'import pyautogui\n'), ((951, 968), 'pyautogui.click', 'pyautogui.click', ([], {}), '()\n', (966, 968), False, 'import pyautogui\n'), ((240, 317), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['f"""assets/tasks/swipe_card/main.png"""'], {'confidence': '(0.7)'}), "(f'assets/tasks/swipe_card/main.png', confidence=0.7)\n", (264, 317), False, 'import pyautogui\n'), ((523, 600), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['f"""assets/tasks/swipe_card/card.png"""'], {'confidence': '(0.8)'}), "(f'assets/tasks/swipe_card/card.png', confidence=0.8)\n", (547, 600), False, 'import pyautogui\n'), ((856, 903), 'pyautogui.moveTo', 'pyautogui.moveTo', (['mouse_pos_x', 'card_position[1]'], {}), '(mouse_pos_x, card_position[1])\n', (872, 903), False, 'import pyautogui\n')]
|
from service import Service
from unittest import TestCase
from mock import patch
import sys
class TestService(TestCase):
@patch('service.Service.bad_random', return_value=10)
def test_bad_random(self, bad_random):
self.assertEqual(bad_random(), 10)
@patch('service.Service.bad_random', return_value=10)
def test_divide(self, bad_random):
x = Service()
self.assertEqual(x.divide(2),5)
self.assertEqual(x.divide(-2),-5)
bad_random.return_value=-10
self.assertEqual(x.divide(2),-5)
bad_random.return_value=0
self.assertEqual(x.divide(sys.maxsize),0)
self.assertEqual(x.divide(-sys.maxsize+1),0)
def test_abs_plus(self):
x=Service()
self.assertEqual(x.abs_plus(10),11)
self.assertEqual(x.abs_plus(0),1)
self.assertEqual(x.abs_plus(-10),11)
self.assertEqual(x.abs_plus(-sys.maxsize+1),sys.maxsize)
self.assertEqual(x.abs_plus(10),11)
@patch('service.Service.bad_random', return_value=10)
def test_complicated_function(self, bad_random):
x = Service()
results = x.complicated_function(20)
self.assertEqual(results[0], 0.5)
self.assertEqual(results[1], 0)
bad_random.return_value=-13
results = x.complicated_function(-1)
self.assertEqual(results[0], 13)
self.assertEqual(results[1], 1)
bad_random.return_value=0
results = x.complicated_function(sys.maxsize)
self.assertEqual(results[0], 0)
self.assertEqual(results[1], 0)
|
[
"mock.patch",
"service.Service"
] |
[((124, 176), 'mock.patch', 'patch', (['"""service.Service.bad_random"""'], {'return_value': '(10)'}), "('service.Service.bad_random', return_value=10)\n", (129, 176), False, 'from mock import patch\n'), ((257, 309), 'mock.patch', 'patch', (['"""service.Service.bad_random"""'], {'return_value': '(10)'}), "('service.Service.bad_random', return_value=10)\n", (262, 309), False, 'from mock import patch\n'), ((878, 930), 'mock.patch', 'patch', (['"""service.Service.bad_random"""'], {'return_value': '(10)'}), "('service.Service.bad_random', return_value=10)\n", (883, 930), False, 'from mock import patch\n'), ((352, 361), 'service.Service', 'Service', ([], {}), '()\n', (359, 361), False, 'from service import Service\n'), ((655, 664), 'service.Service', 'Service', ([], {}), '()\n', (662, 664), False, 'from service import Service\n'), ((988, 997), 'service.Service', 'Service', ([], {}), '()\n', (995, 997), False, 'from service import Service\n')]
|
"""Use TIMESTAMP column for latest submission
Revision ID: eff<PASSWORD>0<PASSWORD>
Revises: <PASSWORD>
Create Date: 2017-01-08 22:20:43.814375
"""
# revision identifiers, used by Alembic.
revision = 'eff<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
import libweasyl
from libweasyl.legacy import UNIXTIME_OFFSET
def upgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_old',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.ArrowColumn(), nullable=False, server_default='epoch'),
)
op.execute(
"UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE 'epoch' + "
"(latest_submission_time_old - %d) * INTERVAL '1 second'" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_old')
def downgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_new',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.WeasylTimestampColumn(), nullable=False, server_default='0'),
)
op.execute(
"UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_new')
|
[
"alembic.op.alter_column",
"alembic.op.drop_column",
"libweasyl.models.helpers.ArrowColumn",
"alembic.op.execute",
"libweasyl.models.helpers.WeasylTimestampColumn"
] |
[((433, 536), 'alembic.op.alter_column', 'op.alter_column', (['"""profile"""', '"""latest_submission_time"""'], {'new_column_name': '"""latest_submission_time_old"""'}), "('profile', 'latest_submission_time', new_column_name=\n 'latest_submission_time_old')\n", (448, 536), False, 'from alembic import op\n'), ((736, 918), 'alembic.op.execute', 'op.execute', (['("UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE \'epoch\' + (latest_submission_time_old - %d) * INTERVAL \'1 second\'"\n % (UNIXTIME_OFFSET,))'], {}), '(\n "UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE \'epoch\' + (latest_submission_time_old - %d) * INTERVAL \'1 second\'"\n % (UNIXTIME_OFFSET,))\n', (746, 918), False, 'from alembic import op\n'), ((933, 988), 'alembic.op.drop_column', 'op.drop_column', (['"""profile"""', '"""latest_submission_time_old"""'], {}), "('profile', 'latest_submission_time_old')\n", (947, 988), False, 'from alembic import op\n'), ((1012, 1115), 'alembic.op.alter_column', 'op.alter_column', (['"""profile"""', '"""latest_submission_time"""'], {'new_column_name': '"""latest_submission_time_new"""'}), "('profile', 'latest_submission_time', new_column_name=\n 'latest_submission_time_new')\n", (1027, 1115), False, 'from alembic import op\n'), ((1321, 1461), 'alembic.op.execute', 'op.execute', (["('UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d'\n % (UNIXTIME_OFFSET,))"], {}), "(\n 'UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d'\n % (UNIXTIME_OFFSET,))\n", (1331, 1461), False, 'from alembic import op\n'), ((1465, 1520), 'alembic.op.drop_column', 'op.drop_column', (['"""profile"""', '"""latest_submission_time_new"""'], {}), "('profile', 'latest_submission_time_new')\n", (1479, 1520), False, 'from alembic import op\n'), ((645, 683), 'libweasyl.models.helpers.ArrowColumn', 'libweasyl.models.helpers.ArrowColumn', ([], {}), '()\n', (681, 683), False, 'import libweasyl\n'), ((1224, 1272), 'libweasyl.models.helpers.WeasylTimestampColumn', 'libweasyl.models.helpers.WeasylTimestampColumn', ([], {}), '()\n', (1270, 1272), False, 'import libweasyl\n')]
|
from bs4 import BeautifulSoup
import requests
import math
import time
start_url='https://www.macys.com'
domain='https://www.macys.com'
''' get soup '''
def get_soup(url):
# get contents from url
content=''
while content=='':
try:
content = requests.get(url,
headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}).content
except:
time.sleep(5)
continue
return BeautifulSoup(content,'lxml') # choose lxml parser
'''find all anchor tags'''
def findAllATags(url):
soup = get_soup(url)
a_tags = soup.findAll('a')
a_tags=[a for a in [a for a in a_tags if 'href' in a.attrs] if a.attrs['href'].find('/shop')==0]
return a_tags
'''print all 'title' attributes'''
def printTitles(url,f):
soup=get_soup(domain+url)
temp=[i.find('a') for i in soup.findAll('div',{'class':'productThumbnailImage'})]
for i in temp:
f.write(i['title']+'\n')
'''iterate through all pages for each soup object'''
def pagination(count, url,f,u):
count_=math.ceil(count/60)
i=2
printTitles(url,f)
u.write(url+'\n')
while i<=count_:
printTitles(url.replace("?","/Pageindex/"+str(i)+"?"),f)
i+=1
'''filehandlers for output.txt and urlHandler.txt'''
def fileHandler():
f=open('output.txt','a')
return f
def urlHandler():
f=open('urlHandler.txt','a')
return f
'''generates soup object for each url'''
def getItems(url):
soup=get_soup(domain+url)
try:
f=fileHandler()
u=urlHandler()
f.write(soup.find('span', {'id' : 'currentCategory'}).text+'\n')
pagination(int(soup.find('span',{'id':'productCount'}).text),url,f, u)
except:
pass
finally:
f.close()
u.close()
'''main function'''
if __name__=='__main__':
start_time=time.time()
items=[]
tags=findAllATags(url=start_url)
'''executing getItems for tags[12:] because first 11 have no relevant information'''
for i in tags[12:]:
getItems(i.attrs['href'])
print(time.time()-start_time)
|
[
"math.ceil",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"time.time"
] |
[((575, 605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (588, 605), False, 'from bs4 import BeautifulSoup\n'), ((1199, 1220), 'math.ceil', 'math.ceil', (['(count / 60)'], {}), '(count / 60)\n', (1208, 1220), False, 'import math\n'), ((2012, 2023), 'time.time', 'time.time', ([], {}), '()\n', (2021, 2023), False, 'import time\n'), ((2237, 2248), 'time.time', 'time.time', ([], {}), '()\n', (2246, 2248), False, 'import time\n'), ((290, 465), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'\n }"}), "(url, headers={'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'\n })\n", (302, 465), False, 'import requests\n'), ((527, 540), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (537, 540), False, 'import time\n')]
|
from pydantic import BaseModel, Field, EmailStr
class PostSchema(BaseModel):
id: int = Field(default=None)
title: str = Field(...)
content: str = Field(...)
class Config:
schema_extra = {
"example": {
"title": "Securing FastAPI applications with JWT.",
"content": "In this tutorial, you'll learn how to secure your application by enabling authentication using JWT. We'll be using PyJWT to sign, encode and decode JWT tokens...."
}
}
class UserSchema(BaseModel):
fullname: str = Field(...)
email: EmailStr = Field(...)
password: str = Field(...)
class Config:
schema_extra = {
"example": {
"fullname": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
}
class UserLoginSchema(BaseModel):
email: EmailStr = Field(...)
password: str = Field(...)
class Config:
schema_extra = {
"example": {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
}
|
[
"pydantic.Field"
] |
[((93, 112), 'pydantic.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (98, 112), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((130, 140), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (135, 140), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((160, 170), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (165, 170), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((575, 585), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (580, 585), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((608, 618), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (613, 618), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((639, 649), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (644, 649), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((915, 925), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (920, 925), False, 'from pydantic import BaseModel, Field, EmailStr\n'), ((946, 956), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (951, 956), False, 'from pydantic import BaseModel, Field, EmailStr\n')]
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test_plan
import settings
class Module(test_plan.Testplan):
runScript = settings.KMD_RUNSCRIPT
deviceTargets = ['sim', 'ufpga']
def __init__(self):
super(Module, self).__init__(__name__)
# Convenience globals
kmd = Module.runScript
devices = Module.deviceTargets
ces = ["Core Engine Scheduler"]
nn = ["Neural Network"]
convd = ["CONV HW - Direct"]
convi = ["CONV HW - Image"]
convw = ["CONV HW - Winograd"]
convp = ["CONV HW - Pipeline"]
sdpx1 = ["SDP X1 HW"]
sdpx2 = ["SDP X2 HW"]
sdpy = ["SDP Y HW"]
sdpf = ["SDP HW - Full"]
cdp = ["CDP HW"]
pdp = ["PDP HW"]
def registerNvSmallTests(self, testplan):
testplan.append(
[0, "Written", kmd, "CONV_D_L0_0_small", None, convd, devices, "Convolution test - Sanity test direct convolution",
"Direct convolution, 8x8x128 input cube, 3x3x128 kernel cube and 32 kernels input and weight read from DRAM, no mean and bias data, output written to DRAM through SDP."])
testplan.append(
[0, "Written", kmd, "SDP_X1_L0_0_small", None, sdpx1, devices,
"SDP test - Sanity test for SDP, only X1 enabled with ALU, X2 and Y disable. No DMA used",
"Element wise sum operation in X1, 8x8x32 input cube and 8x8x32 bias cube. Activation function as ReLU"])
testplan.append(
[0, "Written", kmd, "CDP_L0_0_small", None, cdp, devices, "CDP test - Sanity test for CDP",
"Use only linear table with LUT configured with all 1. 8x8x32 input cube and 8x8x32 output cube."])
testplan.append(
[0, "Written", kmd, "PDP_L0_0_small", None, pdp, devices, "PDP test - Sanity test for PDP with max pooling",
"Max pooling, 8x8x32 input cube, 8x8x32 output cube, no padding, 1x1 kernel size. No need to compare data. It is enough if task succeeds to pass this test."])
testplan.append(
[0, "Written", kmd, "NN_L0_1_small", None, nn, devices, "AlexNet", "AlexNet"])
def registerFirmwareSmallTests(self):
testplan = []
registerNvSmallTests(self, testplan)
for item in testplan:
test = test_plan.Test()
test.level = item[0]
test.status = item[1]
test.runscript = item[2]
test.name = item[3]
test.options = item[4]
test.features = item[5]
test.targets = item[6]
test.description = item[7]
test.dependencies = None
self.add_test(test)
def registerTests(self):
registerFirmwareSmallTests(self)
Module.register_tests = registerTests
|
[
"test_plan.Test"
] |
[((3586, 3602), 'test_plan.Test', 'test_plan.Test', ([], {}), '()\n', (3600, 3602), False, 'import test_plan\n')]
|
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2018-09-09 23:06:06 +0100 (Sun, 09 Sep 2018)
#
# https://github.com/harisekhon/devops-python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback
# to help improve or steer this or other code I publish # pylint: disable=line-too-long
#
# https://www.linkedin.com/in/harisekhon
#
"""
Strip ANSI Escape Codes from Text String input
Works as a standard unix filter program, reading from file arguments or standard input and printing to standard output
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pylib'))
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes
from harisekhon import CLI
except ImportError as _:
print('module import failed: %s' % _, file=sys.stderr)
print("Did you remember to build the project by running 'make'?", file=sys.stderr)
print("Alternatively perhaps you tried to copy this program out without it's adjacent libraries?", file=sys.stderr)
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.2'
# pylint: disable=too-few-public-methods
class StripAnsiEscapeCodes(CLI):
# def __init__(self):
# # Python 2.x
# super(StripAnsiEscapeCodes, self).__init__()
# # Python 3.x
# # super().__init__()
def run(self):
if not self.args:
self.args.append('-')
for arg in self.args:
if arg == '-':
continue
if not os.path.exists(arg):
print("'%s' not found" % arg)
sys.exit(ERRORS['WARNING'])
if os.path.isfile(arg):
log_option('file', arg)
elif os.path.isdir(arg):
log_option('directory', arg)
else:
die("path '%s' could not be determined as either a file or directory" % arg)
for filename in self.args:
if filename == '-':
for line in sys.stdin:
print(strip_ansi_escape_codes(line), end='')
else:
with open(filename) as filehandle:
for line in filehandle:
print(strip_ansi_escape_codes(line), end='')
if __name__ == '__main__':
StripAnsiEscapeCodes().main()
|
[
"os.path.exists",
"os.path.isfile",
"os.path.dirname",
"harisekhon.utils.log_option",
"os.path.isdir",
"harisekhon.utils.die",
"harisekhon.utils.strip_ansi_escape_codes",
"sys.exit",
"sys.path.append"
] |
[((900, 923), 'sys.path.append', 'sys.path.append', (['libdir'], {}), '(libdir)\n', (915, 923), False, 'import sys\n'), ((863, 888), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (878, 888), False, 'import os\n'), ((1381, 1392), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (1389, 1392), False, 'import sys\n'), ((1974, 1993), 'os.path.isfile', 'os.path.isfile', (['arg'], {}), '(arg)\n', (1988, 1993), False, 'import os\n'), ((1848, 1867), 'os.path.exists', 'os.path.exists', (['arg'], {}), '(arg)\n', (1862, 1867), False, 'import os\n'), ((1931, 1958), 'sys.exit', 'sys.exit', (["ERRORS['WARNING']"], {}), "(ERRORS['WARNING'])\n", (1939, 1958), False, 'import sys\n'), ((2011, 2034), 'harisekhon.utils.log_option', 'log_option', (['"""file"""', 'arg'], {}), "('file', arg)\n", (2021, 2034), False, 'from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes\n'), ((2052, 2070), 'os.path.isdir', 'os.path.isdir', (['arg'], {}), '(arg)\n', (2065, 2070), False, 'import os\n'), ((2088, 2116), 'harisekhon.utils.log_option', 'log_option', (['"""directory"""', 'arg'], {}), "('directory', arg)\n", (2098, 2116), False, 'from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes\n'), ((2151, 2227), 'harisekhon.utils.die', 'die', (['("path \'%s\' could not be determined as either a file or directory" % arg)'], {}), '("path \'%s\' could not be determined as either a file or directory" % arg)\n', (2154, 2227), False, 'from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes\n'), ((2360, 2389), 'harisekhon.utils.strip_ansi_escape_codes', 'strip_ansi_escape_codes', (['line'], {}), '(line)\n', (2383, 2389), False, 'from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes\n'), ((2542, 2571), 'harisekhon.utils.strip_ansi_escape_codes', 'strip_ansi_escape_codes', (['line'], {}), '(line)\n', (2565, 2571), False, 'from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes\n')]
|
from vk_bot.core.modules.basicplug import BasicPlug
import time
class Counting(BasicPlug):
command = ("отсчет",)
doc = "Отсчет от 1 до 3"
def main(self):
for x in range(3, -1, -1):
if x == 0:
return
self.sendmsg(x)
time.sleep(1)
|
[
"time.sleep"
] |
[((287, 300), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (297, 300), False, 'import time\n')]
|
## Copyright © 2021, Oracle and/or its affiliates.
## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#!/usr/bin/env python
from setuptools import setup
setup(name='wind-marketplace-library',
version="1.0.0",
description='Robot Framework test library for OCI Marketplace',
long_description='Robot Framework test library for OCI Marketplace',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Framework :: WIND Robot Framework',
],
author='<EMAIL>',
author_email='<EMAIL>',
packages=['MarketplaceLibrary'],
license = "UPL-1.0",
install_requires=[
],
extras_require={
'dev': [
]
},
platforms='any',
include_package_data=True,
zip_safe=False)
|
[
"setuptools.setup"
] |
[((212, 803), 'setuptools.setup', 'setup', ([], {'name': '"""wind-marketplace-library"""', 'version': '"""1.0.0"""', 'description': '"""Robot Framework test library for OCI Marketplace"""', 'long_description': '"""Robot Framework test library for OCI Marketplace"""', 'classifiers': "['Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Framework :: WIND Robot Framework']", 'author': '"""<EMAIL>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['MarketplaceLibrary']", 'license': '"""UPL-1.0"""', 'install_requires': '[]', 'extras_require': "{'dev': []}", 'platforms': '"""any"""', 'include_package_data': '(True)', 'zip_safe': '(False)'}), "(name='wind-marketplace-library', version='1.0.0', description=\n 'Robot Framework test library for OCI Marketplace', long_description=\n 'Robot Framework test library for OCI Marketplace', classifiers=[\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Framework :: WIND Robot Framework'], author='<EMAIL>', author_email=\n '<EMAIL>', packages=['MarketplaceLibrary'], license='UPL-1.0',\n install_requires=[], extras_require={'dev': []}, platforms='any',\n include_package_data=True, zip_safe=False)\n", (217, 803), False, 'from setuptools import setup\n')]
|
from django.http import JsonResponse
from django.shortcuts import reverse
from django.urls import NoReverseMatch
from django.views import View
from rest_framework import __version__ as drf_version
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from oilandrope import __version__
class ApiVersionView(View):
http_method_names = ['get']
data = {
'version': __version__,
'powered_by': 'Django Rest Framework',
'drf_version': drf_version,
}
def get(self, request, *args, **kwargs):
return JsonResponse(self.data)
class URLResolverViewSet(ViewSet):
"""
Returns URL with given resolver and params.
"""
permission_classes = [AllowAny]
def resolve_url(self, request, *args, **kwargs):
data = request.data.copy()
if 'resolver' not in data:
raise ValidationError()
resolver = data.pop('resolver')
if isinstance(resolver, list):
resolver = resolver[0]
extra_params = {}
for key, value in data.items():
extra_params[key] = value
try:
url = reverse(resolver, kwargs=extra_params)
except NoReverseMatch:
url = '#no-url'
return Response({'url': url})
|
[
"rest_framework.response.Response",
"rest_framework.exceptions.ValidationError",
"django.shortcuts.reverse",
"django.http.JsonResponse"
] |
[((681, 704), 'django.http.JsonResponse', 'JsonResponse', (['self.data'], {}), '(self.data)\n', (693, 704), False, 'from django.http import JsonResponse\n'), ((1369, 1391), 'rest_framework.response.Response', 'Response', (["{'url': url}"], {}), "({'url': url})\n", (1377, 1391), False, 'from rest_framework.response import Response\n'), ((985, 1002), 'rest_framework.exceptions.ValidationError', 'ValidationError', ([], {}), '()\n', (1000, 1002), False, 'from rest_framework.exceptions import ValidationError\n'), ((1255, 1293), 'django.shortcuts.reverse', 'reverse', (['resolver'], {'kwargs': 'extra_params'}), '(resolver, kwargs=extra_params)\n', (1262, 1293), False, 'from django.shortcuts import reverse\n')]
|
from zerver.lib.actions import do_add_realm_playground
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import RealmPlayground, get_realm
class RealmPlaygroundTests(ZulipTestCase):
def test_create_one_playground_entry(self) -> None:
iago = self.example_user("iago")
payload = {
"name": "Python playground",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
}
# Now send a POST request to the API endpoint.
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_success(resp)
# Check if the actual object exists
realm = get_realm("zulip")
self.assertTrue(
RealmPlayground.objects.filter(realm=realm, name="Python playground").exists()
)
def test_create_multiple_playgrounds_for_same_language(self) -> None:
iago = self.example_user("iago")
data = [
{
"name": "Python playground 1",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
},
{
"name": "Python playground 2",
"pygments_language": "Python",
"url_prefix": "https://python2.example.com",
},
]
for payload in data:
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_success(resp)
realm = get_realm("zulip")
self.assertTrue(
RealmPlayground.objects.filter(realm=realm, name="Python playground 1").exists()
)
self.assertTrue(
RealmPlayground.objects.filter(realm=realm, name="Python playground 2").exists()
)
def test_invalid_params(self) -> None:
iago = self.example_user("iago")
payload = {
"name": "Invalid URL",
"pygments_language": "Python",
"url_prefix": "https://invalid-url",
}
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_error(resp, "url_prefix is not a URL")
payload["url_prefix"] = "https://python.example.com"
payload["pygments_language"] = "a$b$c"
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_error(resp, "Invalid characters in pygments language")
def test_create_already_existing_playground(self) -> None:
iago = self.example_user("iago")
payload = {
"name": "Python playground",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
}
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_success(resp)
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_error(
resp, "Realm playground with this Realm, Pygments language and Name already exists."
)
def test_not_realm_admin(self) -> None:
hamlet = self.example_user("hamlet")
resp = self.api_post(hamlet, "/json/realm/playgrounds")
self.assert_json_error(resp, "Must be an organization administrator")
resp = self.api_delete(hamlet, "/json/realm/playgrounds/1")
self.assert_json_error(resp, "Must be an organization administrator")
def test_delete_realm_playground(self) -> None:
iago = self.example_user("iago")
realm = get_realm("zulip")
playground_info = dict(
name="Python playground",
pygments_language="Python",
url_prefix="https://python.example.com",
)
playground_id = do_add_realm_playground(realm, acting_user=iago, **playground_info)
self.assertTrue(RealmPlayground.objects.filter(name="Python playground").exists())
result = self.api_delete(iago, f"/json/realm/playgrounds/{playground_id + 1}")
self.assert_json_error(result, "Invalid playground")
result = self.api_delete(iago, f"/json/realm/playgrounds/{playground_id}")
self.assert_json_success(result)
self.assertFalse(RealmPlayground.objects.filter(name="Python").exists())
|
[
"zerver.models.RealmPlayground.objects.filter",
"zerver.lib.actions.do_add_realm_playground",
"zerver.models.get_realm"
] |
[((697, 715), 'zerver.models.get_realm', 'get_realm', (['"""zulip"""'], {}), "('zulip')\n", (706, 715), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((1517, 1535), 'zerver.models.get_realm', 'get_realm', (['"""zulip"""'], {}), "('zulip')\n", (1526, 1535), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((3517, 3535), 'zerver.models.get_realm', 'get_realm', (['"""zulip"""'], {}), "('zulip')\n", (3526, 3535), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((3734, 3801), 'zerver.lib.actions.do_add_realm_playground', 'do_add_realm_playground', (['realm'], {'acting_user': 'iago'}), '(realm, acting_user=iago, **playground_info)\n', (3757, 3801), False, 'from zerver.lib.actions import do_add_realm_playground\n'), ((753, 822), 'zerver.models.RealmPlayground.objects.filter', 'RealmPlayground.objects.filter', ([], {'realm': 'realm', 'name': '"""Python playground"""'}), "(realm=realm, name='Python playground')\n", (783, 822), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((1573, 1644), 'zerver.models.RealmPlayground.objects.filter', 'RealmPlayground.objects.filter', ([], {'realm': 'realm', 'name': '"""Python playground 1"""'}), "(realm=realm, name='Python playground 1')\n", (1603, 1644), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((1701, 1772), 'zerver.models.RealmPlayground.objects.filter', 'RealmPlayground.objects.filter', ([], {'realm': 'realm', 'name': '"""Python playground 2"""'}), "(realm=realm, name='Python playground 2')\n", (1731, 1772), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((3826, 3882), 'zerver.models.RealmPlayground.objects.filter', 'RealmPlayground.objects.filter', ([], {'name': '"""Python playground"""'}), "(name='Python playground')\n", (3856, 3882), False, 'from zerver.models import RealmPlayground, get_realm\n'), ((4192, 4237), 'zerver.models.RealmPlayground.objects.filter', 'RealmPlayground.objects.filter', ([], {'name': '"""Python"""'}), "(name='Python')\n", (4222, 4237), False, 'from zerver.models import RealmPlayground, get_realm\n')]
|
#
# This is Seisflows
#
# See LICENCE file
#
#
###############################################################################
# Import system modules
import os
# Import Numpy
import numpy as np
# Local imports
from seisflows.tools import unix
from seisflows.tools.math import dot
from seisflows.tools.tools import loadtxt, savetxt, loadnpy, savenpy
class NLCG:
""" Nonlinear conjugate gradient method
"""
def __init__(self, path='.', load=loadnpy, save=savenpy, thresh=1.,
maxiter=np.inf, precond=None):
self.path = path
self.load = load
self.save = save
self.maxiter = maxiter
self.thresh = thresh
self.precond = precond
try:
self.iter = loadtxt(self.path+'/'+'NLCG/iter')
except IOError:
unix.mkdir(self.path+'/'+'NLCG')
self.iter = 0
def __call__(self):
""" Returns NLCG search direction
"""
self.iter += 1
savetxt(self.path+'/'+'NLCG/iter', self.iter)
unix.cd(self.path)
g_new = self.load('g_new')
if self.iter == 1:
return -g_new, 0
elif self.iter > self.maxiter:
print('restarting NLCG... [periodic restart]')
self.restart()
return -g_new, 1
# compute search direction
g_old = self.load('g_old')
p_old = self.load('p_old')
if self.precond:
beta = pollak_ribere(g_new, g_old, self.precond)
p_new = -self.precond(g_new) + beta*p_old
else:
beta = pollak_ribere(g_new, g_old)
p_new = -g_new + beta*p_old
# check restart conditions
if check_conjugacy(g_new, g_old) > self.thresh:
print('restarting NLCG... [loss of conjugacy]')
self.restart()
return -g_new, 1
elif check_descent(p_new, g_new) > 0.:
print('restarting NLCG... [not a descent direction]')
self.restart()
return -g_new, 1
else:
return p_new, 0
def restart(self):
""" Restarts algorithm
"""
self.iter = 1
savetxt(self.path+'/'+'NLCG/iter', self.iter)
# Utility functions
def fletcher_reeves(g_new, g_old, precond=lambda x: x):
num = dot(precond(g_new), g_new)
den = dot(g_old, g_old)
beta = num/den
return beta
def pollak_ribere(g_new, g_old, precond=lambda x: x):
num = dot(precond(g_new), g_new-g_old)
den = dot(g_old, g_old)
beta = num/den
return beta
def check_conjugacy(g_new, g_old):
return abs(dot(g_new, g_old) / dot(g_new, g_new))
def check_descent(p_new, g_new):
return dot(p_new, g_new) / dot(g_new, g_new)
|
[
"seisflows.tools.unix.mkdir",
"seisflows.tools.unix.cd",
"seisflows.tools.tools.loadtxt",
"seisflows.tools.tools.savetxt",
"seisflows.tools.math.dot"
] |
[((2343, 2360), 'seisflows.tools.math.dot', 'dot', (['g_old', 'g_old'], {}), '(g_old, g_old)\n', (2346, 2360), False, 'from seisflows.tools.math import dot\n'), ((2505, 2522), 'seisflows.tools.math.dot', 'dot', (['g_old', 'g_old'], {}), '(g_old, g_old)\n', (2508, 2522), False, 'from seisflows.tools.math import dot\n'), ((984, 1033), 'seisflows.tools.tools.savetxt', 'savetxt', (["(self.path + '/' + 'NLCG/iter')", 'self.iter'], {}), "(self.path + '/' + 'NLCG/iter', self.iter)\n", (991, 1033), False, 'from seisflows.tools.tools import loadtxt, savetxt, loadnpy, savenpy\n'), ((1039, 1057), 'seisflows.tools.unix.cd', 'unix.cd', (['self.path'], {}), '(self.path)\n', (1046, 1057), False, 'from seisflows.tools import unix\n'), ((2171, 2220), 'seisflows.tools.tools.savetxt', 'savetxt', (["(self.path + '/' + 'NLCG/iter')", 'self.iter'], {}), "(self.path + '/' + 'NLCG/iter', self.iter)\n", (2178, 2220), False, 'from seisflows.tools.tools import loadtxt, savetxt, loadnpy, savenpy\n'), ((2695, 2712), 'seisflows.tools.math.dot', 'dot', (['p_new', 'g_new'], {}), '(p_new, g_new)\n', (2698, 2712), False, 'from seisflows.tools.math import dot\n'), ((2715, 2732), 'seisflows.tools.math.dot', 'dot', (['g_new', 'g_new'], {}), '(g_new, g_new)\n', (2718, 2732), False, 'from seisflows.tools.math import dot\n'), ((744, 782), 'seisflows.tools.tools.loadtxt', 'loadtxt', (["(self.path + '/' + 'NLCG/iter')"], {}), "(self.path + '/' + 'NLCG/iter')\n", (751, 782), False, 'from seisflows.tools.tools import loadtxt, savetxt, loadnpy, savenpy\n'), ((2610, 2627), 'seisflows.tools.math.dot', 'dot', (['g_new', 'g_old'], {}), '(g_new, g_old)\n', (2613, 2627), False, 'from seisflows.tools.math import dot\n'), ((2630, 2647), 'seisflows.tools.math.dot', 'dot', (['g_new', 'g_new'], {}), '(g_new, g_new)\n', (2633, 2647), False, 'from seisflows.tools.math import dot\n'), ((815, 851), 'seisflows.tools.unix.mkdir', 'unix.mkdir', (["(self.path + '/' + 'NLCG')"], {}), "(self.path + '/' + 'NLCG')\n", (825, 851), False, 'from seisflows.tools import unix\n')]
|
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.5/physics.xsd"
@dataclass
class Physics:
"""
The physics tag specifies the type and properties of the dynamics engine.
Parameters
----------
max_step_size: Maximum time step size at which every system in
simulation can interact with the states of the world. (was
physics.sdf's dt).
real_time_factor: target simulation speedup factor, defined by ratio
of simulation time to real-time.
real_time_update_rate: Rate at which to update the physics engine
(UpdatePhysics calls per real-time second). (was physics.sdf's
update_rate).
max_contacts: Maximum number of contacts allowed between two
entities. This value can be over ridden by a max_contacts
element in a collision element.
gravity: The gravity vector in m/s^2, expressed in a coordinate
frame defined by the spherical_coordinates tag.
magnetic_field: The magnetic vector in Tesla, expressed in a
coordinate frame defined by the spherical_coordinates tag.
simbody: Simbody specific physics properties
bullet: Bullet specific physics properties
ode: ODE specific physics properties
name: The name of this set of physics parameters.
default: If true, this physics element is set as the default physics
profile for the world. If multiple default physics elements
exist, the first element marked as default is chosen. If no
default physics element exists, the first physics element is
chosen.
type: The type of the dynamics engine. Current options are ode,
bullet, simbody and rtql8. Defaults to ode if left unspecified.
"""
class Meta:
name = "physics"
max_step_size: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
real_time_factor: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
real_time_update_rate: float = field(
default=1000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gravity: str = field(
default="0 0 -9.8",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
magnetic_field: str = field(
default="5.5645e-6 22.8758e-6 -42.3884e-6",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
simbody: Optional["Physics.Simbody"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Physics.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: str = field(
default="default_physics",
metadata={
"type": "Attribute",
},
)
default: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Simbody:
"""
Simbody specific physics properties.
Parameters
----------
min_step_size: (Currently not used in simbody) The time duration
which advances with each iteration of the dynamics engine,
this has to be no bigger than max_step_size under physics
block. If left unspecified, min_step_size defaults to
max_step_size.
accuracy: Roughly the relative error of the system.
-LOG(accuracy) is roughly the number of significant digits.
max_transient_velocity: Tolerable "slip" velocity allowed by the
solver when static friction is supposed to hold
object in place.
contact: Relationship among dissipation, coef. restitution, etc.
d = dissipation coefficient (1/velocity) vc =
capture velocity (velocity where e=e_max) vp =
plastic velocity (smallest v where e=e_min) &gt; vc
Assume real COR=1 when v=0. e_min = given minimum
COR, at v &gt;= vp (a.k.a. plastic_coef_restitution)
d = slope = (1-e_min)/vp OR, e_min = 1 - d*vp
e_max = maximum COR = 1-d*vc, reached at v=vc e = 0,
v &lt;= vc = 1 - d*v, vc
&lt; v &lt; vp = e_min,
v &gt;= vp dissipation factor = d*min(v,vp)
[compliant] cor = e
[rigid] Combining rule e = 0,
e1==e2==0 = 2*e1*e2/(e1+e2),
otherwise
"""
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
accuracy: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_transient_velocity: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact: Optional["Physics.Simbody.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Contact:
"""Relationship among dissipation, coef.
restitution, etc. d = dissipation coefficient (1/velocity) vc = capture velocity (velocity where e=e_max) vp = plastic velocity (smallest v where e=e_min) &gt; vc Assume real COR=1 when v=0. e_min = given minimum COR, at v &gt;= vp (a.k.a. plastic_coef_restitution) d = slope = (1-e_min)/vp OR, e_min = 1 - d*vp e_max = maximum COR = 1-d*vc, reached at v=vc e = 0, v &lt;= vc = 1 - d*v, vc &lt; v &lt; vp = e_min, v &gt;= vp dissipation factor = d*min(v,vp) [compliant] cor = e [rigid] Combining rule e = 0, e1==e2==0 = 2*e1*e2/(e1+e2), otherwise
Parameters
----------
stiffness: Default contact material stiffness
(force/dist or torque/radian).
dissipation: dissipation coefficient to be used in compliant
contact; if not given it is
(1-min_cor)/plastic_impact_velocity
plastic_coef_restitution: this is the COR to be used at high
velocities for rigid impacts; if not given it is 1 -
dissipation*plastic_impact_velocity
plastic_impact_velocity: smallest impact velocity at which
min COR is reached; set to zero if you want the
min COR always to be used
static_friction: static friction (mu_s) as described by this
plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
dynamic_friction: dynamic friction (mu_d) as described by
this plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
viscous_friction: viscous friction (mu_v) with units of
(1/velocity) as described by this plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
override_impact_capture_velocity: for rigid impacts only,
impact velocity at which COR is set to zero;
normally inherited from global default but can
be overridden here. Combining rule: use larger velocity
override_stiction_transition_velocity: This is the largest
slip velocity at which we'll consider a
transition to stiction. Normally inherited
from a global default setting. For a continuous friction
model this is the velocity at which the max
static friction force is reached. Combining
rule: use larger velocity
"""
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plastic_coef_restitution: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plastic_impact_velocity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
static_friction: float = field(
default=0.9,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamic_friction: float = field(
default=0.9,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
viscous_friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
override_impact_capture_velocity: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
override_stiction_transition_velocity: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet specific physics properties.
Parameters
----------
solver:
constraints: Bullet constraint parameters.
"""
solver: Optional["Physics.Bullet.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Bullet.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: sequential_impulse only.
min_step_size: The time duration which advances with each
iteration of the dynamics engine, this has to be no
bigger than max_step_size under physics block. If left
unspecified, min_step_size defaults to max_step_size.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
sor: Set the successive over-relaxation parameter.
"""
type: str = field(
default="sequential_impulse",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
Bullet constraint parameters.
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
split_impulse: Similar to ODE's max_vel implementation. See
http://web.archive.org/web/20120430155635/http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
split_impulse_penetration_threshold: Similar to ODE's
max_vel implementation. See
http://web.archive.org/web/20120430155635/http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse_penetration_threshold: float = field(
default=-0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific physics properties.
Parameters
----------
solver:
constraints: ODE constraint parameters.
"""
solver: Optional["Physics.Ode.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Ode.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: world, quick
min_step_size: The time duration which advances with each
iteration of the dynamics engine, this has to be no
bigger than max_step_size under physics block. If left
unspecified, min_step_size defaults to max_step_size.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
precon_iters: Experimental parameter.
sor: Set the successive over-relaxation parameter.
use_dynamic_moi_rescaling: Flag to enable dynamic rescaling
of moment of inertia in constrained directions.
See gazebo pull request 1114 for the implementation of
this feature. https://osrf-
migration.github.io/gazebo-gh-pages/#!/osrf/gazebo/pull-
request/1114
"""
type: str = field(
default="quick",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precon_iters: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_dynamic_moi_rescaling: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
ODE constraint parameters.
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_max_correcting_vel: The maximum correcting
velocities allowed when resolving contacts.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_max_correcting_vel: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
|
[
"dataclasses.field"
] |
[((1829, 1918), 'dataclasses.field', 'field', ([], {'default': '(0.001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (1834, 1918), False, 'from dataclasses import dataclass, field\n'), ((2015, 2102), 'dataclasses.field', 'field', ([], {'default': '(1.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=1.0, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (2020, 2102), False, 'from dataclasses import dataclass, field\n'), ((2204, 2294), 'dataclasses.field', 'field', ([], {'default': '(1000.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=1000.0, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (2209, 2294), False, 'from dataclasses import dataclass, field\n'), ((2385, 2471), 'dataclasses.field', 'field', ([], {'default': '(20)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=20, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (2390, 2471), False, 'from dataclasses import dataclass, field\n'), ((2557, 2816), 'dataclasses.field', 'field', ([], {'default': '"""0 0 -9.8"""', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True, 'pattern':\n '(\\\\s*(-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+)\\\\s+){2}((-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+))\\\\s*'\n }"}), "(default='0 0 -9.8', metadata={'type': 'Element', 'namespace': '',\n 'required': True, 'pattern':\n '(\\\\s*(-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+)\\\\s+){2}((-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+))\\\\s*'\n })\n", (2562, 2816), False, 'from dataclasses import dataclass, field\n'), ((2890, 3173), 'dataclasses.field', 'field', ([], {'default': '"""5.5645e-6 22.8758e-6 -42.3884e-6"""', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True, 'pattern':\n '(\\\\s*(-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+)\\\\s+){2}((-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+))\\\\s*'\n }"}), "(default='5.5645e-6 22.8758e-6 -42.3884e-6', metadata={'type':\n 'Element', 'namespace': '', 'required': True, 'pattern':\n '(\\\\s*(-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+)\\\\s+){2}((-|\\\\+)?(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+|\\\\d+\\\\.\\\\d+[eE][-\\\\+]?[0-9]+))\\\\s*'\n })\n", (2895, 3173), False, 'from dataclasses import dataclass, field\n'), ((3264, 3330), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': ''}"}), "(default=None, metadata={'type': 'Element', 'namespace': ''})\n", (3269, 3330), False, 'from dataclasses import dataclass, field\n'), ((3430, 3496), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': ''}"}), "(default=None, metadata={'type': 'Element', 'namespace': ''})\n", (3435, 3496), False, 'from dataclasses import dataclass, field\n'), ((3590, 3656), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': ''}"}), "(default=None, metadata={'type': 'Element', 'namespace': ''})\n", (3595, 3656), False, 'from dataclasses import dataclass, field\n'), ((3731, 3795), 'dataclasses.field', 'field', ([], {'default': '"""default_physics"""', 'metadata': "{'type': 'Attribute'}"}), "(default='default_physics', metadata={'type': 'Attribute'})\n", (3736, 3795), False, 'from dataclasses import dataclass, field\n'), ((3862, 3914), 'dataclasses.field', 'field', ([], {'default': '(False)', 'metadata': "{'type': 'Attribute'}"}), "(default=False, metadata={'type': 'Attribute'})\n", (3867, 3914), False, 'from dataclasses import dataclass, field\n'), ((3987, 4056), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (3992, 4056), False, 'from dataclasses import dataclass, field\n'), ((5819, 5909), 'dataclasses.field', 'field', ([], {'default': '(0.0001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.0001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (5824, 5909), False, 'from dataclasses import dataclass, field\n'), ((6030, 6119), 'dataclasses.field', 'field', ([], {'default': '(0.001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (6035, 6119), False, 'from dataclasses import dataclass, field\n'), ((6254, 6342), 'dataclasses.field', 'field', ([], {'default': '(0.01)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.01, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (6259, 6342), False, 'from dataclasses import dataclass, field\n'), ((6492, 6558), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': ''}"}), "(default=None, metadata={'type': 'Element', 'namespace': ''})\n", (6497, 6558), False, 'from dataclasses import dataclass, field\n'), ((12084, 12172), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (12089, 12172), False, 'from dataclasses import dataclass, field\n'), ((12329, 12417), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (12334, 12417), False, 'from dataclasses import dataclass, field\n'), ((17009, 17097), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (17014, 17097), False, 'from dataclasses import dataclass, field\n'), ((17251, 17339), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (17256, 17339), False, 'from dataclasses import dataclass, field\n'), ((9569, 9664), 'dataclasses.field', 'field', ([], {'default': '(100000000.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=100000000.0, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (9574, 9664), False, 'from dataclasses import dataclass, field\n'), ((9820, 9909), 'dataclasses.field', 'field', ([], {'default': '(100.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=100.0, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (9825, 9909), False, 'from dataclasses import dataclass, field\n'), ((10078, 10165), 'dataclasses.field', 'field', ([], {'default': '(0.5)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.5, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (10083, 10165), False, 'from dataclasses import dataclass, field\n'), ((10333, 10420), 'dataclasses.field', 'field', ([], {'default': '(0.5)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.5, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (10338, 10420), False, 'from dataclasses import dataclass, field\n'), ((10580, 10667), 'dataclasses.field', 'field', ([], {'default': '(0.9)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.9, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (10585, 10667), False, 'from dataclasses import dataclass, field\n'), ((10828, 10915), 'dataclasses.field', 'field', ([], {'default': '(0.9)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.9, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (10833, 10915), False, 'from dataclasses import dataclass, field\n'), ((11076, 11163), 'dataclasses.field', 'field', ([], {'default': '(0.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.0, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (11081, 11163), False, 'from dataclasses import dataclass, field\n'), ((11340, 11429), 'dataclasses.field', 'field', ([], {'default': '(0.001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (11345, 11429), False, 'from dataclasses import dataclass, field\n'), ((11611, 11700), 'dataclasses.field', 'field', ([], {'default': '(0.001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (11616, 11700), False, 'from dataclasses import dataclass, field\n'), ((13207, 13311), 'dataclasses.field', 'field', ([], {'default': '"""sequential_impulse"""', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default='sequential_impulse', metadata={'type': 'Element',\n 'namespace': '', 'required': True})\n", (13212, 13311), False, 'from dataclasses import dataclass, field\n'), ((13469, 13559), 'dataclasses.field', 'field', ([], {'default': '(0.0001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.0001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (13474, 13559), False, 'from dataclasses import dataclass, field\n'), ((13707, 13793), 'dataclasses.field', 'field', ([], {'default': '(50)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=50, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (13712, 13793), False, 'from dataclasses import dataclass, field\n'), ((13941, 14028), 'dataclasses.field', 'field', ([], {'default': '(1.3)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=1.3, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (13946, 14028), False, 'from dataclasses import dataclass, field\n'), ((15546, 15633), 'dataclasses.field', 'field', ([], {'default': '(0.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.0, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (15551, 15633), False, 'from dataclasses import dataclass, field\n'), ((15781, 15868), 'dataclasses.field', 'field', ([], {'default': '(0.2)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.2, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (15786, 15868), False, 'from dataclasses import dataclass, field\n'), ((16034, 16123), 'dataclasses.field', 'field', ([], {'default': '(0.001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (16039, 16123), False, 'from dataclasses import dataclass, field\n'), ((16280, 16368), 'dataclasses.field', 'field', ([], {'default': '(True)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=True, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (16285, 16368), False, 'from dataclasses import dataclass, field\n'), ((16548, 16637), 'dataclasses.field', 'field', ([], {'default': '(-0.01)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=-0.01, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (16553, 16637), False, 'from dataclasses import dataclass, field\n'), ((18530, 18621), 'dataclasses.field', 'field', ([], {'default': '"""quick"""', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default='quick', metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (18535, 18621), False, 'from dataclasses import dataclass, field\n'), ((18779, 18869), 'dataclasses.field', 'field', ([], {'default': '(0.0001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.0001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (18784, 18869), False, 'from dataclasses import dataclass, field\n'), ((19017, 19103), 'dataclasses.field', 'field', ([], {'default': '(50)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=50, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (19022, 19103), False, 'from dataclasses import dataclass, field\n'), ((19258, 19344), 'dataclasses.field', 'field', ([], {'default': '(0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0, metadata={'type': 'Element', 'namespace': '', 'required': \n True})\n", (19263, 19344), False, 'from dataclasses import dataclass, field\n'), ((19491, 19578), 'dataclasses.field', 'field', ([], {'default': '(1.3)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=1.3, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (19496, 19578), False, 'from dataclasses import dataclass, field\n'), ((19747, 19836), 'dataclasses.field', 'field', ([], {'default': '(False)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=False, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (19752, 19836), False, 'from dataclasses import dataclass, field\n'), ((20929, 21016), 'dataclasses.field', 'field', ([], {'default': '(0.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.0, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (20934, 21016), False, 'from dataclasses import dataclass, field\n'), ((21164, 21251), 'dataclasses.field', 'field', ([], {'default': '(0.2)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.2, metadata={'type': 'Element', 'namespace': '', 'required':\n True})\n", (21169, 21251), False, 'from dataclasses import dataclass, field\n'), ((21422, 21511), 'dataclasses.field', 'field', ([], {'default': '(100.0)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=100.0, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (21427, 21511), False, 'from dataclasses import dataclass, field\n'), ((21677, 21766), 'dataclasses.field', 'field', ([], {'default': '(0.001)', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=0.001, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (21682, 21766), False, 'from dataclasses import dataclass, field\n')]
|
#!/usr/bin/env python
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import subprocess
# The location of the generate grammar kit script
DIR = os.path.dirname(__file__)
# The location of the plugin directory
PLUGIN_PATH = os.path.abspath(os.path.join(DIR, ".."))
# The location of the grammar-kit directory
GRAMMAR_KIT = os.path.abspath(
os.path.join(DIR, "../../../third-party/java/grammar-kit/")
)
OUT_DIR = os.path.join(PLUGIN_PATH, "gen")
FLEX_OUT_DIR = os.path.join(OUT_DIR, "com/facebook/buck/intellij/ideabuck/lang")
GRAMMAR_KIT_JAR = os.path.join(GRAMMAR_KIT, "grammar-kit.jar")
GRAMMAR_KIT_JFLEX_JAR = os.path.join(GRAMMAR_KIT, "JFlex.jar")
JFLEX_SKELETON = os.path.join(PLUGIN_PATH, "resources/idea-flex.skeleton")
FLEX_FILE = os.path.join(
PLUGIN_PATH, "src/com/facebook/buck/intellij/ideabuck/lang/Buck.flex"
)
BNF_FILE = os.path.join(
PLUGIN_PATH, "src/com/facebook/buck/intellij/ideabuck/lang/Buck.bnf"
)
def subprocess_call(cmd):
print("Running: %s" % (" ".join(cmd)))
subprocess.call(cmd)
shutil.rmtree(OUT_DIR, ignore_errors=True)
subprocess_call(["java", "-jar", GRAMMAR_KIT_JAR, OUT_DIR, BNF_FILE])
subprocess_call(
[
"java",
"-jar",
GRAMMAR_KIT_JFLEX_JAR,
"-sliceandcharat",
"-skel",
JFLEX_SKELETON,
"-d",
FLEX_OUT_DIR,
FLEX_FILE,
]
)
|
[
"os.path.dirname",
"os.path.join",
"subprocess.call",
"shutil.rmtree"
] |
[((793, 818), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (808, 818), False, 'import os\n'), ((1066, 1098), 'os.path.join', 'os.path.join', (['PLUGIN_PATH', '"""gen"""'], {}), "(PLUGIN_PATH, 'gen')\n", (1078, 1098), False, 'import os\n'), ((1114, 1179), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""com/facebook/buck/intellij/ideabuck/lang"""'], {}), "(OUT_DIR, 'com/facebook/buck/intellij/ideabuck/lang')\n", (1126, 1179), False, 'import os\n'), ((1199, 1243), 'os.path.join', 'os.path.join', (['GRAMMAR_KIT', '"""grammar-kit.jar"""'], {}), "(GRAMMAR_KIT, 'grammar-kit.jar')\n", (1211, 1243), False, 'import os\n'), ((1268, 1306), 'os.path.join', 'os.path.join', (['GRAMMAR_KIT', '"""JFlex.jar"""'], {}), "(GRAMMAR_KIT, 'JFlex.jar')\n", (1280, 1306), False, 'import os\n'), ((1325, 1382), 'os.path.join', 'os.path.join', (['PLUGIN_PATH', '"""resources/idea-flex.skeleton"""'], {}), "(PLUGIN_PATH, 'resources/idea-flex.skeleton')\n", (1337, 1382), False, 'import os\n'), ((1395, 1482), 'os.path.join', 'os.path.join', (['PLUGIN_PATH', '"""src/com/facebook/buck/intellij/ideabuck/lang/Buck.flex"""'], {}), "(PLUGIN_PATH,\n 'src/com/facebook/buck/intellij/ideabuck/lang/Buck.flex')\n", (1407, 1482), False, 'import os\n'), ((1496, 1582), 'os.path.join', 'os.path.join', (['PLUGIN_PATH', '"""src/com/facebook/buck/intellij/ideabuck/lang/Buck.bnf"""'], {}), "(PLUGIN_PATH,\n 'src/com/facebook/buck/intellij/ideabuck/lang/Buck.bnf')\n", (1508, 1582), False, 'import os\n'), ((1683, 1725), 'shutil.rmtree', 'shutil.rmtree', (['OUT_DIR'], {'ignore_errors': '(True)'}), '(OUT_DIR, ignore_errors=True)\n', (1696, 1725), False, 'import shutil\n'), ((889, 912), 'os.path.join', 'os.path.join', (['DIR', '""".."""'], {}), "(DIR, '..')\n", (901, 912), False, 'import os\n'), ((993, 1052), 'os.path.join', 'os.path.join', (['DIR', '"""../../../third-party/java/grammar-kit/"""'], {}), "(DIR, '../../../third-party/java/grammar-kit/')\n", (1005, 1052), False, 'import os\n'), ((1660, 1680), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (1675, 1680), False, 'import subprocess\n')]
|
from lxml import html
from d_parser.d_spider_common import DSpiderCommon
from d_parser.helpers.re_set import Ree
from helpers.url_generator import UrlGenerator
from d_parser.helpers.stat_counter import StatCounter as SC
VERSION = 29
# Warn: Don't remove task argument even if not use it (it's break grab and spider crashed)
# Warn: noinspection PyUnusedLocal
class DSpider(DSpiderCommon):
def __init__(self, thread_number, try_limit=0):
super().__init__(thread_number, try_limit)
# parse categories
def task_initial(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
links = grab.doc.select('//div[@id="main-subitems"]//a')
for link in links:
url = UrlGenerator.get_page_params(self.domain, link.attr('href'), {'onpage': 99999})
yield self.do_task('parse_page', url, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page
def task_parse_page(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
# parse items links
items_links = grab.doc.select('//div[@id="catalog-list"]//div[@class="catalog-items"]//a[@property="name"]')
for row in items_links:
link = row.attr('href')
link = UrlGenerator.get_page_params(self.domain, link, {})
yield self.do_task('parse_item', link, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse single item
def task_parse_item(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
# common block with info
product_info = grab.doc.select('//div[@id="product-info"]')
# parse fields
# A = name
product_name = product_info.select('.//h1').text()
# B = [const]
# C = [const]
# D = [const]
product_count_string = product_info.select('.//div[@class="product-data-storehouse"]').text(default='[not found]')
product_count = '-1'
product_status = '0'
product_unit = 'ед.'
if product_count_string != 'в наличии':
self.log.warning(task, 'Skip item, cuz wrong count {}'.format(product_count_string))
return
# E = price
# if E = "запросить цену и наличие" => zapros
# else => float
product_price = product_info.select('.//span[@itemprop="price"]').text().replace(' ', '')
if product_price == 'Уточняйте':
product_price = '-1'
else:
# E = price (float)
# check if correct price
if not Ree.float.match(product_price):
self.log_warn(SC.MSG_UNKNOWN_PRICE, f'Skip item, cuz wrong price {product_price}', task)
return
# F = vendor code
product_vendor_code = product_info.select('.//div[@class="product-data-articul"]').text()
# G = vendor
product_vendor = product_info.select('.//div[@class="product-data-producer"]').text()
# H = photo url
product_photo_url_raw = product_info.select('.//div[@id="product-images-list"]/div[1]/img[@itemprop="contentUrl"]').attr('src')
product_photo_url = UrlGenerator.get_page_params(self.domain, product_photo_url_raw, {})
# pre I
product_description_part_raw = product_info.select('.//div[@class="product-description description"]/following-sibling::node()[2]')\
.text(default='')\
.replace('$(".description").html(\'', '')\
.replace('\');', '')
# I = description
# this part insert pure html with js, so we need clear all html tags and &-symbols
product_description_part_list = html.fromstring(f'<div>{product_description_part_raw}</div>').xpath('string()')
product_description_part = ''
for row in product_description_part_list:
product_description_part += row
product_description = {'Описание': product_description_part}
table = product_info.select('.//div[@class="product-description table"]/div')
for row in table:
key = row.select('./text()').text()
value = row.select('./span').text()
if key:
product_description[key] = value
# ID
product_id = product_info.select('.//div[@class="product-add-but"]').attr('data-id', '')
# save
self.result.add({
'name': product_name,
'quantity': product_count,
'delivery': product_status,
'measure': product_unit,
'price': product_price,
'sku': product_vendor_code,
'manufacture': product_vendor,
'photo': product_photo_url,
'id': product_id,
'properties': product_description
})
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
|
[
"helpers.url_generator.UrlGenerator.get_page_params",
"d_parser.helpers.re_set.Ree.float.match",
"lxml.html.fromstring"
] |
[((3756, 3824), 'helpers.url_generator.UrlGenerator.get_page_params', 'UrlGenerator.get_page_params', (['self.domain', 'product_photo_url_raw', '{}'], {}), '(self.domain, product_photo_url_raw, {})\n', (3784, 3824), False, 'from helpers.url_generator import UrlGenerator\n'), ((1550, 1601), 'helpers.url_generator.UrlGenerator.get_page_params', 'UrlGenerator.get_page_params', (['self.domain', 'link', '{}'], {}), '(self.domain, link, {})\n', (1578, 1601), False, 'from helpers.url_generator import UrlGenerator\n'), ((3130, 3160), 'd_parser.helpers.re_set.Ree.float.match', 'Ree.float.match', (['product_price'], {}), '(product_price)\n', (3145, 3160), False, 'from d_parser.helpers.re_set import Ree\n'), ((4292, 4353), 'lxml.html.fromstring', 'html.fromstring', (['f"""<div>{product_description_part_raw}</div>"""'], {}), "(f'<div>{product_description_part_raw}</div>')\n", (4307, 4353), False, 'from lxml import html\n')]
|
#!/usr/bin/python3
import logging
import argparse
from time import time
import toml
from data.io.knowledge_graph import KnowledgeGraph
from data.io.tarball import Tarball
from data.io.tsv import TSV
from data.utils import is_readable, is_writable
from embeddings import graph_structure
from tasks.node_classification import build_dataset, build_model, evaluate_model
from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph
def single_run(A, X, Y, X_node_map, tsv_writer, config):
tsv_writer.writerow(["epoch", "training_loss", "training_accurary",
"validation_loss", "validation_accuracy",
"test_loss", "test_accuracy"])
# create splits
dataset = mksplits(X, Y, X_node_map,
config['task']['dataset_ratio'])
# compile model computation graph
model = build_model(X, Y, A, config)
# train model
nepoch = config['model']['epoch']
batch_size = X.shape[0] # number of nodes
sample_weights = sample_mask(dataset['train']['X_idx'],
Y.shape[0])
for epoch in train_model(A, model, dataset, sample_weights, batch_size, nepoch):
# log metrics
tsv_writer.writerow([str(epoch[0]),
str(epoch[1]),
str(epoch[2]),
str(epoch[3]),
str(epoch[4]),
"-1", "-1"])
# test model
test_loss, test_acc = test_model(A, model, dataset, batch_size)
# log metrics
tsv_writer.writerow(["-1", "-1", "-1", "-1", "-1",
str(test_loss[0]), str(test_acc[0])])
return (test_loss[0], test_acc[0])
def kfold_crossvalidation(A, X, Y, X_node_map, k, tsv_writer, config):
tsv_writer.writerow(["fold", "epoch",
"training_loss", "training_accurary",
"validation_loss", "validation_accuracy",
"test_loss", "test_accuracy"])
# generate fold indices
folds_idx = mkfolds(X_node_map.shape[0], k)
results = []
logger.info("Starting {}-fold cross validation".format(k))
for fold in range(1, k+1):
logger.info("Fold {} / {}".format(fold, k))
# compile model computation graph
model = build_model(X, Y, A, config)
# initialize fold
dataset = init_fold(X, Y, X_node_map, folds_idx[fold-1],
config['task']['dataset_ratio'])
# train model
nepoch = config['model']['epoch']
batch_size = X.shape[0] # number of nodes
sample_weights = sample_mask(dataset['train']['X_idx'],
Y.shape[0])
for epoch in train_model(A, model, dataset, sample_weights, batch_size, nepoch):
# log metrics
tsv_writer.writerow([str(fold),
str(epoch[0]),
str(epoch[1]),
str(epoch[2]),
str(epoch[3]),
str(epoch[4]),
"-1", "-1"])
# test model
test_loss, test_acc = test_model(A, model, dataset, batch_size)
results.append((test_loss[0], test_acc[0]))
# log metrics
tsv_writer.writerow([str(fold),
"-1", "-1", "-1", "-1", "-1",
str(test_loss[0]), str(test_acc[0])])
mean_loss, mean_acc = tuple(sum(e)/len(e) for e in zip(*results))
tsv_writer.writerow(["-1", "-1", "-1", "-1", "-1", "-1",
str(mean_loss), str(mean_acc)])
return (mean_loss, mean_acc)
def train_model(A, model, dataset, sample_weights, batch_size, nepoch):
logging.info("Training for {} epoch".format(nepoch))
# Log wall-clock time
t0 = time()
for epoch in range(1, nepoch+1):
# Single training iteration
model.fit(x=[dataset['train']['X']] + A,
y=dataset['train']['Y'],
batch_size=batch_size,
epochs=1,
shuffle=False,
sample_weight=sample_weights,
validation_data=([dataset['val']['X']] + A,
dataset['val']['Y']),
callbacks=[],
verbose=0)
# Predict on full dataset
Y_hat = model.predict(x=[dataset['train']['X']] + A,
batch_size=batch_size,
verbose=0)
# Train / validation scores
train_val_loss, train_val_acc = evaluate_model(Y_hat,
[dataset['train']['Y'],
dataset['val']['Y']],
[dataset['train']['X_idx'],
dataset['val']['X_idx']])
logging.info("{:04d} ".format(epoch) \
+ "| train loss {:.4f} / acc {:.4f} ".format(train_val_loss[0],
train_val_acc[0])
+ "| val loss {:.4f} / acc {:.4f}".format(train_val_loss[1],
train_val_acc[1]))
yield (epoch,
train_val_loss[0], train_val_acc[0],
train_val_loss[1], train_val_acc[1])
logging.info("training time: {:.2f}s".format(time()-t0))
def test_model(A, model, dataset, batch_size):
# Predict on full dataset
Y_hat = model.predict(x=[dataset['train']['X']] + A,
batch_size=batch_size,
verbose=0)
test_loss, test_acc = evaluate_model(Y_hat,
[dataset['test']['Y']],
[dataset['test']['X_idx']])
logging.info("Performance on test set: loss {:.4f} / accuracy {:.4f}".format(
test_loss[0],
test_acc[0]))
return (test_loss, test_acc)
def run(args, tsv_writer, config):
set_seed(config['task']['seed'])
# prep data
if args.input is None:
logging.debug("No tarball supplied - building task prequisites")
with KnowledgeGraph(path=config['graph']['file']) as kg:
targets = strip_graph(kg, config)
A = graph_structure.generate(kg, config)
X, Y, X_node_map = build_dataset(kg, targets, config)
else:
assert is_readable(args.input)
logging.debug("Importing prepared tarball")
with Tarball(args.input, 'r') as tb:
A = tb.get('A')
X = tb.get('X')
Y = tb.get('Y')
X_node_map = tb.get('X_node_map')
if config['task']['kfolds'] < 0:
loss, accuracy = single_run(A, X, Y, X_node_map, tsv_writer, config)
else:
loss, accuracy = kfold_crossvalidation(A, X, Y, X_node_map,
config['task']['kfolds'],
tsv_writer, config)
logging.info("Mean performance: loss {:.4f} / accuracy {:.4f}".format(
loss,
accuracy))
if args.verbose < 1:
print("Mean performance: loss {:.4f} / accuracy {:.4f}".format(
loss,
accuracy))
def init_logger(filename, verbose=0):
logging.basicConfig(filename=filename,
format='[%(asctime)s] %(module)s/%(funcName)s | %(levelname)s: %(message)s',
level=logging.DEBUG)
if verbose > 0:
stream_handler = logging.StreamHandler()
level = logging.INFO
if verbose >= 2:
level = logging.DEBUG
stream_handler.setLevel(level)
logging.getLogger().addHandler(stream_handler)
if __name__ == "__main__":
timestamp = int(time())
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="Configuration file (toml)", required=True, default=None)
parser.add_argument("-i", "--input", help="Optional prepared input file (tar)", default=None)
parser.add_argument("-o", "--output", help="Output directory", default="/tmp/")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action='count', default=0)
args = parser.parse_args()
# load configuration
assert is_readable(args.config)
config = toml.load(args.config)
# set output base filename
baseFilename = "{}{}{}".format(args.output, config['name'], timestamp) if args.output.endswith("/") \
else "{}/{}{}".format(args.output, config['name'], timestamp)
assert is_writable(baseFilename)
init_logger(baseFilename+'.log', args.verbose)
logger = logging.getLogger(__name__)
tsv_writer = TSV(baseFilename+'.tsv', 'w')
# log parameters
logger.debug("Arguments:\n{}".format(
"\n".join(["\t{}: {}".format(arg, getattr(args, arg)) for arg in vars(args)])))
logger.debug("Configuration:\n{}".format(
"\n".join(["\t{}: {}".format(k,v) for k,v in config.items()])))
# run training
run(args, tsv_writer, config)
logging.shutdown()
|
[
"logging.getLogger",
"logging.StreamHandler",
"tasks.utils.mksplits",
"tasks.utils.init_fold",
"logging.debug",
"embeddings.graph_structure.generate",
"data.io.knowledge_graph.KnowledgeGraph",
"tasks.utils.strip_graph",
"data.io.tarball.Tarball",
"tasks.node_classification.build_model",
"argparse.ArgumentParser",
"toml.load",
"tasks.utils.sample_mask",
"tasks.utils.mkfolds",
"tasks.node_classification.build_dataset",
"data.utils.is_readable",
"data.utils.is_writable",
"time.time",
"logging.basicConfig",
"tasks.utils.set_seed",
"data.io.tsv.TSV",
"logging.shutdown",
"tasks.node_classification.evaluate_model"
] |
[((766, 825), 'tasks.utils.mksplits', 'mksplits', (['X', 'Y', 'X_node_map', "config['task']['dataset_ratio']"], {}), "(X, Y, X_node_map, config['task']['dataset_ratio'])\n", (774, 825), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((906, 934), 'tasks.node_classification.build_model', 'build_model', (['X', 'Y', 'A', 'config'], {}), '(X, Y, A, config)\n', (917, 934), False, 'from tasks.node_classification import build_dataset, build_model, evaluate_model\n'), ((1064, 1114), 'tasks.utils.sample_mask', 'sample_mask', (["dataset['train']['X_idx']", 'Y.shape[0]'], {}), "(dataset['train']['X_idx'], Y.shape[0])\n", (1075, 1114), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((2140, 2171), 'tasks.utils.mkfolds', 'mkfolds', (['X_node_map.shape[0]', 'k'], {}), '(X_node_map.shape[0], k)\n', (2147, 2171), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((4000, 4006), 'time.time', 'time', ([], {}), '()\n', (4004, 4006), False, 'from time import time\n'), ((5946, 6019), 'tasks.node_classification.evaluate_model', 'evaluate_model', (['Y_hat', "[dataset['test']['Y']]", "[dataset['test']['X_idx']]"], {}), "(Y_hat, [dataset['test']['Y']], [dataset['test']['X_idx']])\n", (5960, 6019), False, 'from tasks.node_classification import build_dataset, build_model, evaluate_model\n'), ((6329, 6361), 'tasks.utils.set_seed', 'set_seed', (["config['task']['seed']"], {}), "(config['task']['seed'])\n", (6337, 6361), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((7644, 7789), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'filename', 'format': '"""[%(asctime)s] %(module)s/%(funcName)s | %(levelname)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(filename=filename, format=\n '[%(asctime)s] %(module)s/%(funcName)s | %(levelname)s: %(message)s',\n level=logging.DEBUG)\n", (7663, 7789), False, 'import logging\n'), ((8161, 8186), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8184, 8186), False, 'import argparse\n'), ((8646, 8670), 'data.utils.is_readable', 'is_readable', (['args.config'], {}), '(args.config)\n', (8657, 8670), False, 'from data.utils import is_readable, is_writable\n'), ((8684, 8706), 'toml.load', 'toml.load', (['args.config'], {}), '(args.config)\n', (8693, 8706), False, 'import toml\n'), ((8938, 8963), 'data.utils.is_writable', 'is_writable', (['baseFilename'], {}), '(baseFilename)\n', (8949, 8963), False, 'from data.utils import is_readable, is_writable\n'), ((9029, 9056), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9046, 9056), False, 'import logging\n'), ((9079, 9110), 'data.io.tsv.TSV', 'TSV', (["(baseFilename + '.tsv')", '"""w"""'], {}), "(baseFilename + '.tsv', 'w')\n", (9082, 9110), False, 'from data.io.tsv import TSV\n'), ((9442, 9460), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (9458, 9460), False, 'import logging\n'), ((2395, 2423), 'tasks.node_classification.build_model', 'build_model', (['X', 'Y', 'A', 'config'], {}), '(X, Y, A, config)\n', (2406, 2423), False, 'from tasks.node_classification import build_dataset, build_model, evaluate_model\n'), ((2478, 2564), 'tasks.utils.init_fold', 'init_fold', (['X', 'Y', 'X_node_map', 'folds_idx[fold - 1]', "config['task']['dataset_ratio']"], {}), "(X, Y, X_node_map, folds_idx[fold - 1], config['task'][\n 'dataset_ratio'])\n", (2487, 2564), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((2727, 2777), 'tasks.utils.sample_mask', 'sample_mask', (["dataset['train']['X_idx']", 'Y.shape[0]'], {}), "(dataset['train']['X_idx'], Y.shape[0])\n", (2738, 2777), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((4773, 4899), 'tasks.node_classification.evaluate_model', 'evaluate_model', (['Y_hat', "[dataset['train']['Y'], dataset['val']['Y']]", "[dataset['train']['X_idx'], dataset['val']['X_idx']]"], {}), "(Y_hat, [dataset['train']['Y'], dataset['val']['Y']], [\n dataset['train']['X_idx'], dataset['val']['X_idx']])\n", (4787, 4899), False, 'from tasks.node_classification import build_dataset, build_model, evaluate_model\n'), ((6414, 6478), 'logging.debug', 'logging.debug', (['"""No tarball supplied - building task prequisites"""'], {}), "('No tarball supplied - building task prequisites')\n", (6427, 6478), False, 'import logging\n'), ((6734, 6757), 'data.utils.is_readable', 'is_readable', (['args.input'], {}), '(args.input)\n', (6745, 6757), False, 'from data.utils import is_readable, is_writable\n'), ((6766, 6809), 'logging.debug', 'logging.debug', (['"""Importing prepared tarball"""'], {}), "('Importing prepared tarball')\n", (6779, 6809), False, 'import logging\n'), ((7875, 7898), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7896, 7898), False, 'import logging\n'), ((8139, 8145), 'time.time', 'time', ([], {}), '()\n', (8143, 8145), False, 'from time import time\n'), ((6492, 6536), 'data.io.knowledge_graph.KnowledgeGraph', 'KnowledgeGraph', ([], {'path': "config['graph']['file']"}), "(path=config['graph']['file'])\n", (6506, 6536), False, 'from data.io.knowledge_graph import KnowledgeGraph\n'), ((6566, 6589), 'tasks.utils.strip_graph', 'strip_graph', (['kg', 'config'], {}), '(kg, config)\n', (6577, 6589), False, 'from tasks.utils import mksplits, init_fold, mkfolds, sample_mask, set_seed, strip_graph\n'), ((6606, 6642), 'embeddings.graph_structure.generate', 'graph_structure.generate', (['kg', 'config'], {}), '(kg, config)\n', (6630, 6642), False, 'from embeddings import graph_structure\n'), ((6674, 6708), 'tasks.node_classification.build_dataset', 'build_dataset', (['kg', 'targets', 'config'], {}), '(kg, targets, config)\n', (6687, 6708), False, 'from tasks.node_classification import build_dataset, build_model, evaluate_model\n'), ((6823, 6847), 'data.io.tarball.Tarball', 'Tarball', (['args.input', '"""r"""'], {}), "(args.input, 'r')\n", (6830, 6847), False, 'from data.io.tarball import Tarball\n'), ((5685, 5691), 'time.time', 'time', ([], {}), '()\n', (5689, 5691), False, 'from time import time\n'), ((8044, 8063), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8061, 8063), False, 'import logging\n')]
|
import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing
from actn import *
ap = argparse.ArgumentParser()
# for help -> python alpha.py --help
ap.add_argument("-f", "--file", required=True,
help="name of the file")
ap.add_argument("-o", "--output", required=True,
help="specifiy the folder path of output")
ap.add_argument("-b", "--before", required=True,
help="seconds to cut before", type=int)
ap.add_argument("-a", "--after", required=True,
help="seconds to cut after", type=int)
args = vars(ap.parse_args())
class core_overwatch():
def __init__(self, file_name, output_folder, before, after):
self.file_name = file_name
self.output_folder = output_folder
self.before = before
self.after = after
if not os.path.exists(str(self.output_folder)):
print("The File Path Doesn't Exist!")
print("[++++++]Creating the Folder in Path {0}".format(output_folder))
os.makedirs("{0}".format(self.output_folder))
print("[++++++]Finished Making The Folder in Path {0}".format(output_folder))
try:
fh = open('{0}'.format(self.file_name), 'r')
except FileNotFoundError:
print("[+++++++]The Video File Not Found In Path.Please Try Again")
cmd1 = "ffmpeg -i {0} 2>&1 | sed -n \"s/.*, \(.*\) fp.*/\\1/p\"".format(self.file_name)
os.system(cmd1 + ' > tmp1')
self.fps = int(open('tmp1', 'r').read())
os.system(
""" ffprobe -v error -select_streams v:0 -show_entries stream=nb_frames -of default=nokey=1:noprint_wrappers=1 {0} > tmp2 """.format(
self.file_name
))
self.frame_count = int(open('tmp2', 'r').read())
print('[++++++]fps', self.fps)
print('[++++++]frame count', self.frame_count)
# get imp vid inf
def build_folder(self):
folder_names = ['./raw_calc', './raw_calc/frame_db_temp']
for directory in folder_names:
if not os.path.exists(str(directory)):
os.makedirs(str(directory))
# if exists then delete all the files in that dir tree
def which_frame_formula(self):
second_length = 1
chunk_size = round(self.fps * second_length) # fps*second_length
assert type(chunk_size) is int, "Chunk Size must have to be Integer Type"
# upto which frame the ops will execute(for loop to extract one frame from chunk size )
n = round(round(self.frame_count) / chunk_size)
start_frame = round(self.fps / 2)
common_diff = round(self.fps * second_length) # * second length,taking 1F/60
return start_frame, n, common_diff
def select_frame(self, a, n, d):
# arithmetic series y=a+(p-1)*d
which_frame_list = [a + (p - 1) * d for p in range(1, n + 1)]
return which_frame_list
def read_save_frame(self):
os.system("ffmpeg -hide_banner -loglevel panic -i {video_Fname} -vf fps=1 {f_name}/%d.png".format(
f_name='./raw_calc/frame_db_temp', video_Fname=str(self.file_name)
))
def get_action_process_multithreaded_cmd_run_commands(self):
img_list = ['./raw_calc/frame_db_temp/{0}'.format(x) for x in os.listdir('./raw_calc/frame_db_temp')]
img_list.sort(key=lambda fx: int(''.join(filter(str.isdigit, fx))))
az = return_text(img_list)
return az
# utils function start here -3
def _dbl(self, time):
if time < 10:
return '0' + str(time)
else:
return str(time)
def time_cut(self, input_in_sec):
times = []
hours = 0
minutes = 0
seconds = 0
hours = input_in_sec // 3600
minutes = (input_in_sec % 3600) // 60
seconds = (input_in_sec % 3600) % 60
return "{}:{}:{}".format(core_overwatch._dbl(self, hours), core_overwatch._dbl(self, minutes),
core_overwatch._dbl(self, seconds))
def findIndices(self, sequence, _str, extra=0): # 0011
assert len(sequence) < len(_str), "Sequence is Greater Than the Main String"
indices = []
for i in range(len(_str) - len(sequence) + 1):
temp = _str[i:i + len(sequence)]
if (sequence == temp):
indices.append(i + 2 - extra)
return indices
# utils fx ends here
def action_index_find(self, raw_list, which_frame):
raw_str_hashed = ''
for j in raw_list:
raw_str_hashed += str(j)
assert type(raw_str_hashed) is str, " The parameter to find Indices Type must have to be a String"
result_list = core_overwatch.findIndices(self, '01', raw_str_hashed, extra=1)
final_result = []
for yx in result_list:
final_result.append(int(which_frame[yx]))
return final_result
def build_frame_range_to_cut(self, action_result):
# print(action_result)
# input will be taken ->cp from raw code
frames = round(self.frame_count)
fps = round(self.fps)
bef = int(self.before) * fps # count frm
aft = int(self.after) * fps
# frame range (tuple ds) contained list
frame_range = []
# build condition for after and before trimming
for ucv in action_result:
if int(ucv) < bef and aft < frames:
frame_range.append((0, ucv + aft))
elif int(ucv) < bef and aft > frames:
frame_range.append((0, frames))
elif int(ucv) > bef and aft < frames:
frame_range.append((ucv - bef, ucv + aft))
elif int(ucv) > bef and aft < frames:
frame_range.append((ucv - bef, frames))
# (temp) test
return frame_range
def build_output(self, start, end, video_name, file_name, end1):
os.system(
'ffmpeg -hide_banner -loglevel panic -ss {st} -i {ivfname} -to {ed} -c copy {ovfname}'.format(st=start,
ed=end1,
ivfname=self.file_name,
ovfname=video_name))
file_ = open('{}'.format(file_name), 'w')
file_.write('Start at : {sec} \n End at : {sec1} '.format(sec=start, sec1=end))
file_.close()
def send_frame_signal(self, frame_range):
# frame range is like [(0,21),(4,198)]
assert type(frame_range) is list, "Frame range must have to be a list"
fps = round(self.fps)
# build video file path name
ax = str(datetime.datetime.now())
tm = ax[0:10] + '_' + ax[11:]
file_n_ = str(self.output_folder + '/' + str(tm))
os.makedirs(file_n_)
video_type = os.path.splitext(os.path.basename(str(self.file_name)))[1] # output e.g as .mp4
for ux in range(len(frame_range)):
start = core_overwatch.time_cut(self, input_in_sec=math.ceil(frame_range[ux][0] / fps))
end = core_overwatch.time_cut(self, input_in_sec=math.ceil(frame_range[ux][1] / fps))
end1 = core_overwatch.time_cut(self, input_in_sec=math.ceil(
(frame_range[ux][1] / fps) - (frame_range[ux][0] / fps)))
print('[++++++]Start at {0} End at {1}'.format(start, end))
core_overwatch.build_output(self, start=str(start),
end=str(end),
video_name=file_n_ + '/output{vid_number}{type_v}'.format(vid_number=ux,
type_v=video_type),
file_name=file_n_ + '/output{0}.txt'.format(ux),
end1=end1
)
print("Total {0} Videos have been cut from Main Video".format(len(os.listdir(file_n_))/2))
if __name__ == "__main__":
a = core_overwatch(file_name=str(args['file']), output_folder=str(args['output']), before=int(args['before']),
after=int(args['after']))
a.build_folder()
start_frame, n, common_diff = a.which_frame_formula() # returns a,n,d
c = a.select_frame(start_frame, n, common_diff) # returns which_frame_list
st = time.time()
print("[+++++]Reading Frames....")
a.read_save_frame()
print("[+++++++]Finished Reading Frames")
print("[+++++++]Image Processing Rolling....")
d = a.get_action_process_multithreaded_cmd_run_commands()
print("[++++++++]Finished Processing Images")
f = a.action_index_find(raw_list=d, which_frame=c) # return list to start aft and bef(action first observed)
g = a.build_frame_range_to_cut(f)
a.send_frame_signal(frame_range=g)
print('[++++++]Time req to run The Engine is {0}m'.format((time.time() - st) / 60))
print('Deleting temp folders..')
shutil.rmtree('./raw_calc/frame_db_temp')
os.remove('./tmp1')
os.remove('./tmp2')
|
[
"os.listdir",
"math.ceil",
"argparse.ArgumentParser",
"os.makedirs",
"datetime.datetime.now",
"shutil.rmtree",
"os.system",
"time.time",
"os.remove"
] |
[((117, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (140, 142), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((8644, 8655), 'time.time', 'time.time', ([], {}), '()\n', (8653, 8655), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((9253, 9294), 'shutil.rmtree', 'shutil.rmtree', (['"""./raw_calc/frame_db_temp"""'], {}), "('./raw_calc/frame_db_temp')\n", (9266, 9294), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((9299, 9318), 'os.remove', 'os.remove', (['"""./tmp1"""'], {}), "('./tmp1')\n", (9308, 9318), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((9323, 9342), 'os.remove', 'os.remove', (['"""./tmp2"""'], {}), "('./tmp2')\n", (9332, 9342), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((1473, 1501), 'os.system', 'os.system', (["(cmd1 + ' > tmp1')"], {}), "(cmd1 + ' > tmp1')\n", (1482, 1501), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((7046, 7066), 'os.makedirs', 'os.makedirs', (['file_n_'], {}), '(file_n_)\n', (7057, 7066), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((6915, 6938), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6936, 6938), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((3339, 3377), 'os.listdir', 'os.listdir', (['"""./raw_calc/frame_db_temp"""'], {}), "('./raw_calc/frame_db_temp')\n", (3349, 3377), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((7277, 7312), 'math.ceil', 'math.ceil', (['(frame_range[ux][0] / fps)'], {}), '(frame_range[ux][0] / fps)\n', (7286, 7312), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((7375, 7410), 'math.ceil', 'math.ceil', (['(frame_range[ux][1] / fps)'], {}), '(frame_range[ux][1] / fps)\n', (7384, 7410), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((7474, 7536), 'math.ceil', 'math.ceil', (['(frame_range[ux][1] / fps - frame_range[ux][0] / fps)'], {}), '(frame_range[ux][1] / fps - frame_range[ux][0] / fps)\n', (7483, 7536), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((9186, 9197), 'time.time', 'time.time', ([], {}), '()\n', (9195, 9197), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n'), ((8238, 8257), 'os.listdir', 'os.listdir', (['file_n_'], {}), '(file_n_)\n', (8248, 8257), False, 'import argparse, time, os, cv2, shutil, datetime, math, subprocess, pickle, multiprocessing\n')]
|
from dcim.choices import DeviceStatusChoices
from dcim.models import Device
from extras.reports import Report
class DeviceIPReport(Report):
description = (
"Check that every device has either an IPv4 or IPv6 primary address assigned"
)
def test_primary_ip4(self):
for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE):
intcount = 0
for interface in device.interfaces.all():
if not interface.mgmt_only:
intcount += 1
# There may be dumb devices with no interfaces so no IP addresses, that's OK
if intcount == 0:
if device.primary_ip4_id is not None:
if device.primary_ip6_id is not None:
self.log_failure(
device,
"Device has primary IPv4 and IPv6 address but no interfaces",
)
else:
self.log_warning(
device,
"Device has missing primary IPv4 addresses but no interfaces",
)
else:
self.log_success(device)
elif device.primary_ip4_id is None:
if device.device_type.is_child_device is True:
self.log_success(device)
else:
if device.primary_ip6_id is None:
self.log_failure(
device, "Device is missing primary IPv4 and IPv6 address"
)
else:
self.log_warning(
device, "Device is missing primary IPv4 addresses"
)
else:
if device.device_type.is_child_device is True:
self.log_success(device)
else:
if device.primary_ip6_id is None:
self.log_info(device, "Device is missing primary IPv6 address")
else:
self.log_success(device)
|
[
"dcim.models.Device.objects.filter"
] |
[((309, 372), 'dcim.models.Device.objects.filter', 'Device.objects.filter', ([], {'status': 'DeviceStatusChoices.STATUS_ACTIVE'}), '(status=DeviceStatusChoices.STATUS_ACTIVE)\n', (330, 372), False, 'from dcim.models import Device\n')]
|
import socket
import random
import os
import requests
import re
import github
import minecraft
import string
import sys
HOST = "xeroxirc.net"
PORT = 6667
NICK = "ak_sus"
#PASSWORD = os.getenv("PASSWORD")
CHANNEL = "#BlockySurvival"
SERVER = ""
readbuffer = ""
def send(message):
s.send(message)
print(message)
s = socket.socket()
s.connect((HOST, PORT))
send(bytes("NICK %s\r\n" % NICK, "UTF-8"))
send(bytes("USER %s %s %s :%s\r\n" % (NICK, NICK, NICK, NICK), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ regain {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ identify {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))
send(bytes("JOIN {}\r\n".format(CHANNEL), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ :identify {}\r\n".format(PASSWORD), "UTF-8"))
readbuffer = readbuffer + s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
SERVER = str.rstrip(line)[1:].split()[0]
print(str.rstrip(line))
while 1:
readbuffer = readbuffer + s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
print(str.rstrip(line))
message = str.rstrip(line).split(" PRIVMSG {} :".format(CHANNEL))
if "PING" in line: send("PONG :{}\r\n".format(SERVER).encode("utf-8"))
msg = message[-1]
tokens = msg.split()
if msg == "$hello": send("PRIVMSG {} :Hello!\r\n".format(CHANNEL).encode("utf-8"))
if msg == "$ping": send("PRIVMSG {} :Pong!\r\n".format(CHANNEL).encode("utf-8"))
if msg == "$random": send("PRIVMSG {} :{}\r\n".format(CHANNEL, random.randint(0, 100)).encode("utf-8"))
if msg.startswith("$youtube "):
html = requests.get("https://www.youtube.com/results?search_query=" + " ".join(msg.split()[1:])).content
video_ids = re.findall(r"watch\?v=(\S{11})", html.decode())
send("PRIVMSG {} :https://www.youtube.com/watch?v={}\r\n".format(CHANNEL, video_ids[0]).encode("utf-8"))
#if msg.startswith("$google "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, googlesearch.search(" ".join(msg.split()[1:]))[0]).encode("utf-8"))
#if msg.startswith("$wolfram "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, wolfram.get(" ".join(msg.split()[1:]))).encode("utf-8"))
if msg.startswith("$github "):
if tokens[1] == "url": send("PRIVMSG {} :https://github.com/{}/{}\r\n".format(CHANNEL, tokens[2], tokens[3]).encode("utf-8"))
if tokens[1] == "issues": send("PRIVMSG {} :#{}: {}\r\n".format(CHANNEL, tokens[4], github.get_issue_title(tokens[2], tokens[3], tokens[4])).encode("utf-8"))
if msg == "$server": send("PRIVMSG {} :{}\r\n".format(CHANNEL, minecraft.get()).encode("utf-8"))
if msg == "$help": send("PRIVMSG {} :Avalible commands: $hello, $ping, $youtube, $google, $github, $wolfram.\r\n".format(CHANNEL).encode("utf-8"))
if msg.startswith("$help "):
if tokens[1] == "hello": send("PRIVMSG {} :Syntax: $hello Action: Says \"Hello!\".\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "ping":send("PRIVMSG {} :Syntax: $ping Action: Says \"Ping!\".\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "youtube": send("PRIVMSG {} :Syntax: $youtube <keyword> Action: Sends the URL of a YouTube video matching the keyword given.\r\n".format(CHANNEL).encode("utf-8"))
#if tokens[1] == "google": send("PRIVMSG {} :Syntax: $google <keyword> Action: Sends the URL of a google search with the keyword given\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "github": send("PRIVMSG {} :Syntax: $github <topic> <user> <repo> <number> Action: Returns data about a github repo.\r\n".format(CHANNEL).encode("utf-8"))
#if tokens[1] == "wolfram": send("PRIVMSG {} :Syntax: $wolfram <query> Action: Asks Wolfram|Alpha the query given.\r\n".format(CHANNEL).encode("utf-8"))
|
[
"github.get_issue_title",
"minecraft.get",
"random.randint",
"socket.socket"
] |
[((322, 337), 'socket.socket', 'socket.socket', ([], {}), '()\n', (335, 337), False, 'import socket\n'), ((1631, 1653), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1645, 1653), False, 'import random\n'), ((2726, 2741), 'minecraft.get', 'minecraft.get', ([], {}), '()\n', (2739, 2741), False, 'import minecraft\n'), ((2581, 2636), 'github.get_issue_title', 'github.get_issue_title', (['tokens[2]', 'tokens[3]', 'tokens[4]'], {}), '(tokens[2], tokens[3], tokens[4])\n', (2603, 2636), False, 'import github\n')]
|
"""
The file defines the evaluate process on target dataset.
@Author: <NAME>
@Github: https://github.com/luyanger1799
@Project: https://github.com/luyanger1799/amazing-semantic-segmentation
"""
from sklearn.metrics import multilabel_confusion_matrix
from amazingutils.helpers import *
from amazingutils.utils import load_image
import numpy as np
import argparse
import sys
import cv2
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', help='The path of the dataset.', type=str, default='CamVid')
parser.add_argument('--crop_height', help='The height to crop the image.', type=int, default=256)
parser.add_argument('--crop_width', help='The width to crop the image.', type=int, default=256)
parser.add_argument('--predictions', help='The path of predicted image.', type=str, required=True)
args = parser.parse_args()
# check related paths
paths = check_related_path(os.getcwd())
# get image and label file names for training and validation
_, _, _, _, _, test_label_names = get_dataset_info(args.dataset)
# get color info
csv_file = os.path.join(args.dataset, 'class_dict.csv')
class_names, _ = get_colored_info(csv_file)
# get the prediction file name list
if not os.path.exists(args.predictions):
raise ValueError('the path of predictions does not exit.')
prediction_names = []
for file in sorted(os.listdir(args.predictions)):
prediction_names.append(os.path.join(args.predictions, file))
# evaluated classes
evaluated_classes = get_evaluated_classes(os.path.join(args.dataset, 'evaluated_classes.txt'))
num_classes = len(class_names)
class_iou = dict()
for name in evaluated_classes:
class_iou[name] = list()
class_idx = dict(zip(class_names, range(num_classes)))
# begin evaluate
assert len(test_label_names) == len(prediction_names)
for i, (name1, name2) in enumerate(zip(test_label_names, prediction_names)):
sys.stdout.write('\rRunning test image %d / %d' % (i + 1, len(test_label_names)))
sys.stdout.flush()
label = np.array(cv2.resize(load_image(name1),
dsize=(args.crop_width, args.crop_height), interpolation=cv2.INTER_NEAREST))
pred = np.array(cv2.resize(load_image(name2),
dsize=(args.crop_width, args.crop_height), interpolation=cv2.INTER_NEAREST))
confusion_matrix = multilabel_confusion_matrix(label.flatten(), pred.flatten(), labels=list(class_idx.values()))
for eval_cls in evaluated_classes:
eval_idx = class_idx[eval_cls]
(tn, fp), (fn, tp) = confusion_matrix[eval_idx]
if tp + fn > 0:
class_iou[eval_cls].append(tp / (tp + fp + fn))
print('\n****************************************')
print('* The IoU of each class is as follows: *')
print('****************************************')
for eval_cls in evaluated_classes:
class_iou[eval_cls] = np.mean(class_iou[eval_cls])
print('{cls:}: {iou:.4f}'.format(cls=eval_cls, iou=class_iou[eval_cls]))
print('\n**********************************************')
print('* The Mean IoU of all classes is as follows: *')
print('**********************************************')
print('Mean IoU: {mean_iou:.4f}'.format(mean_iou=np.mean(list(class_iou.values()))))
|
[
"os.path.exists",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"argparse.ArgumentTypeError",
"os.getcwd",
"sys.stdout.flush",
"amazingutils.utils.load_image"
] |
[((651, 676), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (674, 676), False, 'import argparse\n'), ((1311, 1355), 'os.path.join', 'os.path.join', (['args.dataset', '"""class_dict.csv"""'], {}), "(args.dataset, 'class_dict.csv')\n", (1323, 1355), False, 'import os\n'), ((1142, 1153), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1151, 1153), False, 'import os\n'), ((1445, 1477), 'os.path.exists', 'os.path.exists', (['args.predictions'], {}), '(args.predictions)\n', (1459, 1477), False, 'import os\n'), ((1584, 1612), 'os.listdir', 'os.listdir', (['args.predictions'], {}), '(args.predictions)\n', (1594, 1612), False, 'import os\n'), ((1744, 1795), 'os.path.join', 'os.path.join', (['args.dataset', '"""evaluated_classes.txt"""'], {}), "(args.dataset, 'evaluated_classes.txt')\n", (1756, 1795), False, 'import os\n'), ((2204, 2222), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2220, 2222), False, 'import sys\n'), ((3093, 3121), 'numpy.mean', 'np.mean', (['class_iou[eval_cls]'], {}), '(class_iou[eval_cls])\n', (3100, 3121), True, 'import numpy as np\n'), ((1643, 1679), 'os.path.join', 'os.path.join', (['args.predictions', 'file'], {}), '(args.predictions, file)\n', (1655, 1679), False, 'import os\n'), ((586, 639), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (612, 639), False, 'import argparse\n'), ((2256, 2273), 'amazingutils.utils.load_image', 'load_image', (['name1'], {}), '(name1)\n', (2266, 2273), False, 'from amazingutils.utils import load_image\n'), ((2415, 2432), 'amazingutils.utils.load_image', 'load_image', (['name2'], {}), '(name2)\n', (2425, 2432), False, 'from amazingutils.utils import load_image\n')]
|
from pso.GPSO import GPSO
import numpy as np
import time
import pandas as pd
np.random.seed(42)
# f1 完成
def Sphere(p):
# Sphere函数
out_put = 0
for i in p:
out_put += i ** 2
return out_put
# f2 完成
def Sch222(x):
out_put = 0
out_put01 = 1
for i in x:
out_put += abs(i)
out_put01 = abs(i)*out_put01
out_put = out_put01+out_put
return out_put
# f3 完成
def Quadric(x):
output = 0
# print(x.shape[0])
for i in range(x.shape[0]):
output += (np.sum(x[0:i+1])) ** 2
# print(np.square(np.sum(x[0:i+1])))
return output
# f4 完成
def Schl(x):
# print(np.max(np.abs(x)))
return np.max(np.abs(x))
# f5 完成
def Step(x):
output = 0
for i in x:
output += (np.floor(i+0.5))**2
return output
# f6 完成
def Noise(x):
output = 0
cnt = 1
for i in x:
output = cnt * (i**4) + output
cnt += 1
output += np.random.rand()
return output
# f7 完成
def Rosenbrock(p):
'''
-2.048<=xi<=2.048
函数全局最优点在一个平滑、狭长的抛物线山谷内,使算法很难辨别搜索方向,查找最优也变得十分困难
在(1,...,1)处可以找到极小值0
:param p:
:return:
'''
n_dim = len(p)
res = 0
for i in range(n_dim - 1):
res += 100 * np.square(np.square(p[i]) - p[i + 1]) + np.square(p[i] - 1)
return res
# f8 有问题,忽略,这个是APSO的f8
def Schewel(x):
out_put = 0
for i in x:
out_put += -i*np.sin(np.sqrt(abs(i)))
return out_put
# f9 完成
def Rastrigin(p):
'''
多峰值函数,也是典型的非线性多模态函数
-5.12<=xi<=5.12
在范围内有10n个局部最小值,峰形高低起伏不定跳跃。很难找到全局最优
has a global minimum at x = 0 where f(x) = 0
'''
return np.sum([np.square(x) - 10 * np.cos(2 * np.pi * x) + 10 for x in p])
# f10
def Ackley(x):
part1 = 0
part2 = 0
for i in x:
part1 += (i**2)
part2 += np.cos(2 * np.pi * i)
left = 20 * np.exp(-0.2 * ((part1 / x.shape[0]) ** .5))
right = np.exp(part2 / x.shape[0])
return -left - right + 20 + np.e
# f11 ok
def Griewank(p):
'''
存在多个局部最小值点,数目与问题的维度有关。
此函数是典型的非线性多模态函数,具有广泛的搜索空间,是优化算法很难处理的复杂多模态问题。
在(0,...,0)处取的全局最小值0
-600<=xi<=600
'''
part1 = [np.square(x) / 4000 for x in p]
part2 = [np.cos(x / np.sqrt(i + 1)) for i, x in enumerate(p)]
return np.sum(part1) - np.prod(part2) + 1
g = 10000
times = 30
table = np.zeros((2, 10))
gBest = np.zeros((10, 30)) # 1010个函数的30次的最优值
for i in range(times):
optimizer = GPSO(func=Sphere, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Sphere:', optimizer.gbest_y)
table[0, 0] += optimizer.gbest_y
table[1, 0] += end - start
gBest[0, i] = optimizer.gbest_y
optimizer = GPSO(func=Sch222, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-10), ub=np.ones(30) * 10,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Sch222:', optimizer.gbest_y)
table[0, 1] += optimizer.gbest_y
table[1, 1] += end - start
gBest[1, i] = optimizer.gbest_y
optimizer = GPSO(func=Quadric, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=100)
start = time.time()
optimizer.run()
end = time.time()
print('Quadric:', optimizer.gbest_y)
table[0, 2] += optimizer.gbest_y
table[1, 2] += end - start
gBest[2, i] = optimizer.gbest_y
optimizer = GPSO(func=Rosenbrock, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-10), ub=np.ones(30) * 10,
w=0.9, c1=2, c2=2, acceptance=100)
start = time.time()
optimizer.run()
end = time.time()
print('Rosenbrock:', optimizer.gbest_y)
table[0, 3] += optimizer.gbest_y
table[1, 3] += end - start
gBest[3, i] = optimizer.gbest_y
optimizer = GPSO(func=Step, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=0)
start = time.time()
optimizer.run()
end = time.time()
print('Step:', optimizer.gbest_y)
table[0, 4] += optimizer.gbest_y
table[1, 4] += end - start
gBest[4, i] = optimizer.gbest_y
optimizer = GPSO(func=Noise, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-1.28), ub=np.ones(30) * 1.28,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Noise:', optimizer.gbest_y)
table[0, 5] += optimizer.gbest_y
table[1, 5] += end - start
gBest[5, i] = optimizer.gbest_y
optimizer = GPSO(func=Schewel, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-500), ub=np.ones(30) * 500,
w=0.9, c1=2, c2=2, acceptance=-10000)
start = time.time()
optimizer.run()
end = time.time()
print('Schewel:', optimizer.gbest_y)
table[0, 6] += optimizer.gbest_y
table[1, 6] += end - start
gBest[6, i] = optimizer.gbest_y
optimizer = GPSO(func=Rastrigin, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-5.12), ub=np.ones(30) * 5.12,
w=0.9, c1=2, c2=2, acceptance=50)
start = time.time()
optimizer.run()
end = time.time()
print('Rastrigin:', optimizer.gbest_y)
table[0, 7] += optimizer.gbest_y
table[1, 7] += end - start
gBest[7, i] = optimizer.gbest_y
optimizer = GPSO(func=Ackley, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-32), ub=np.ones(30) * 32,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Ackley:', optimizer.gbest_y)
table[0, 8] += optimizer.gbest_y
table[1, 8] += end - start
gBest[8, i] = optimizer.gbest_y
optimizer = GPSO(func=Griewank, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-600), ub=np.ones(30) * 600,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Griewank:', optimizer.gbest_y)
table[0, 9] += optimizer.gbest_y
table[1, 9] += end - start
gBest[9, i] = optimizer.gbest_y
table = table / times
table = pd.DataFrame(table)
table.columns = ['Sphere', 'Schwefel_P222', 'Quadric', 'Rosenbrock', 'Step', 'Quadric_Noise', 'Schwefel',
'Rastrigin', 'Ackley', 'Griewank']
table.index = ['mean score', 'mean time']
print(table)
print('10个测试函数的30次std:', np.std(gBest, axis=1))
print('10个测试函数的30次best:', np.min(gBest, axis=1))
|
[
"numpy.abs",
"numpy.prod",
"numpy.sqrt",
"numpy.random.rand",
"numpy.ones",
"numpy.floor",
"numpy.min",
"numpy.square",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.random.seed",
"numpy.cos",
"numpy.std",
"pandas.DataFrame",
"time.time"
] |
[((78, 96), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (92, 96), True, 'import numpy as np\n'), ((2299, 2316), 'numpy.zeros', 'np.zeros', (['(2, 10)'], {}), '((2, 10))\n', (2307, 2316), True, 'import numpy as np\n'), ((2325, 2343), 'numpy.zeros', 'np.zeros', (['(10, 30)'], {}), '((10, 30))\n', (2333, 2343), True, 'import numpy as np\n'), ((6195, 6214), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {}), '(table)\n', (6207, 6214), True, 'import pandas as pd\n'), ((930, 946), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (944, 946), True, 'import numpy as np\n'), ((1883, 1909), 'numpy.exp', 'np.exp', (['(part2 / x.shape[0])'], {}), '(part2 / x.shape[0])\n', (1889, 1909), True, 'import numpy as np\n'), ((2564, 2575), 'time.time', 'time.time', ([], {}), '()\n', (2573, 2575), False, 'import time\n'), ((2606, 2617), 'time.time', 'time.time', ([], {}), '()\n', (2615, 2617), False, 'import time\n'), ((2939, 2950), 'time.time', 'time.time', ([], {}), '()\n', (2948, 2950), False, 'import time\n'), ((2981, 2992), 'time.time', 'time.time', ([], {}), '()\n', (2990, 2992), False, 'import time\n'), ((3316, 3327), 'time.time', 'time.time', ([], {}), '()\n', (3325, 3327), False, 'import time\n'), ((3358, 3369), 'time.time', 'time.time', ([], {}), '()\n', (3367, 3369), False, 'import time\n'), ((3695, 3706), 'time.time', 'time.time', ([], {}), '()\n', (3704, 3706), False, 'import time\n'), ((3737, 3748), 'time.time', 'time.time', ([], {}), '()\n', (3746, 3748), False, 'import time\n'), ((4071, 4082), 'time.time', 'time.time', ([], {}), '()\n', (4080, 4082), False, 'import time\n'), ((4113, 4124), 'time.time', 'time.time', ([], {}), '()\n', (4122, 4124), False, 'import time\n'), ((4447, 4458), 'time.time', 'time.time', ([], {}), '()\n', (4456, 4458), False, 'import time\n'), ((4489, 4500), 'time.time', 'time.time', ([], {}), '()\n', (4498, 4500), False, 'import time\n'), ((4826, 4837), 'time.time', 'time.time', ([], {}), '()\n', (4835, 4837), False, 'import time\n'), ((4868, 4879), 'time.time', 'time.time', ([], {}), '()\n', (4877, 4879), False, 'import time\n'), ((5207, 5218), 'time.time', 'time.time', ([], {}), '()\n', (5216, 5218), False, 'import time\n'), ((5249, 5260), 'time.time', 'time.time', ([], {}), '()\n', (5258, 5260), False, 'import time\n'), ((5585, 5596), 'time.time', 'time.time', ([], {}), '()\n', (5594, 5596), False, 'import time\n'), ((5627, 5638), 'time.time', 'time.time', ([], {}), '()\n', (5636, 5638), False, 'import time\n'), ((5964, 5975), 'time.time', 'time.time', ([], {}), '()\n', (5973, 5975), False, 'import time\n'), ((6006, 6017), 'time.time', 'time.time', ([], {}), '()\n', (6015, 6017), False, 'import time\n'), ((6467, 6488), 'numpy.std', 'np.std', (['gBest'], {'axis': '(1)'}), '(gBest, axis=1)\n', (6473, 6488), True, 'import numpy as np\n'), ((6516, 6537), 'numpy.min', 'np.min', (['gBest'], {'axis': '(1)'}), '(gBest, axis=1)\n', (6522, 6537), True, 'import numpy as np\n'), ((673, 682), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (679, 682), True, 'import numpy as np\n'), ((1789, 1810), 'numpy.cos', 'np.cos', (['(2 * np.pi * i)'], {}), '(2 * np.pi * i)\n', (1795, 1810), True, 'import numpy as np\n'), ((1827, 1869), 'numpy.exp', 'np.exp', (['(-0.2 * (part1 / x.shape[0]) ** 0.5)'], {}), '(-0.2 * (part1 / x.shape[0]) ** 0.5)\n', (1833, 1869), True, 'import numpy as np\n'), ((516, 534), 'numpy.sum', 'np.sum', (['x[0:i + 1]'], {}), '(x[0:i + 1])\n', (522, 534), True, 'import numpy as np\n'), ((756, 773), 'numpy.floor', 'np.floor', (['(i + 0.5)'], {}), '(i + 0.5)\n', (764, 773), True, 'import numpy as np\n'), ((1255, 1274), 'numpy.square', 'np.square', (['(p[i] - 1)'], {}), '(p[i] - 1)\n', (1264, 1274), True, 'import numpy as np\n'), ((2124, 2136), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (2133, 2136), True, 'import numpy as np\n'), ((2233, 2246), 'numpy.sum', 'np.sum', (['part1'], {}), '(part1)\n', (2239, 2246), True, 'import numpy as np\n'), ((2249, 2263), 'numpy.prod', 'np.prod', (['part2'], {}), '(part2)\n', (2256, 2263), True, 'import numpy as np\n'), ((2180, 2194), 'numpy.sqrt', 'np.sqrt', (['(i + 1)'], {}), '(i + 1)\n', (2187, 2194), True, 'import numpy as np\n'), ((2451, 2462), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2458, 2462), True, 'import numpy as np\n'), ((2476, 2487), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2483, 2487), True, 'import numpy as np\n'), ((2828, 2839), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2835, 2839), True, 'import numpy as np\n'), ((2852, 2863), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2859, 2863), True, 'import numpy as np\n'), ((3204, 3215), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3211, 3215), True, 'import numpy as np\n'), ((3229, 3240), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3236, 3240), True, 'import numpy as np\n'), ((3585, 3596), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3592, 3596), True, 'import numpy as np\n'), ((3609, 3620), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3616, 3620), True, 'import numpy as np\n'), ((3961, 3972), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3968, 3972), True, 'import numpy as np\n'), ((3986, 3997), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3993, 3997), True, 'import numpy as np\n'), ((4332, 4343), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4339, 4343), True, 'import numpy as np\n'), ((4358, 4369), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4365, 4369), True, 'import numpy as np\n'), ((4711, 4722), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4718, 4722), True, 'import numpy as np\n'), ((4736, 4747), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4743, 4747), True, 'import numpy as np\n'), ((5094, 5105), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5101, 5105), True, 'import numpy as np\n'), ((5120, 5131), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5127, 5131), True, 'import numpy as np\n'), ((5474, 5485), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5481, 5485), True, 'import numpy as np\n'), ((5498, 5509), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5505, 5509), True, 'import numpy as np\n'), ((5851, 5862), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5858, 5862), True, 'import numpy as np\n'), ((5876, 5887), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5883, 5887), True, 'import numpy as np\n'), ((1622, 1634), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1631, 1634), True, 'import numpy as np\n'), ((1225, 1240), 'numpy.square', 'np.square', (['p[i]'], {}), '(p[i])\n', (1234, 1240), True, 'import numpy as np\n'), ((1642, 1663), 'numpy.cos', 'np.cos', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1648, 1663), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
# @Time : 2019/7/21 12:35 PM
# @Author : __wutonghe__
# docs https://channels.readthedocs.io/en/latest/tutorial/part_3.html#rewrite-the-consumer-to-be-asynchronous
from channels.generic.websocket import AsyncWebsocketConsumer
import json
class MessageConsumer(AsyncWebsocketConsumer):
"""
私信websocket,采用异步通信来增加并发
"""
async def connect(self):
"""当 websocket 一链接上以后触发该函数"""
if self.scope['user'].is_anonymous:
await self.close()
else:
await self.channel_layer.group_add(self.scope['user'].username + '-message',self.channel_name) # 创建聊天室
await self.accept()
async def receive(self, text_data=None, bytes_data=None):
"""将答复交回给websocket"""
await self.send(text_data=json.dumps(text_data)) # 将消息发送给前端
async def disconnect(self, code):
"""断开链接时触发该函数"""
await self.channel_layer.group_discard(self.scope['user'].username + '-message',self.channel_name) # 将该链接移出聊天室
|
[
"json.dumps"
] |
[((793, 814), 'json.dumps', 'json.dumps', (['text_data'], {}), '(text_data)\n', (803, 814), False, 'import json\n')]
|
import io
from .. import util
def test_parsing_pkg_info_file(mocker):
open_mock = mocker.patch('vcsver.util.open')
open_mock.return_value = io.StringIO(
'Name: name\n'
'Version: 1.0\n'
)
pkg_info_data = util.parse_pkg_info_file(mocker.sentinel.path)
open_mock = open_mock.assert_called_once_with(
mocker.sentinel.path,
'rt',
)
assert {
'Name': 'name',
'Version': '1.0',
} == pkg_info_data
|
[
"io.StringIO"
] |
[((151, 194), 'io.StringIO', 'io.StringIO', (['"""Name: name\nVersion: 1.0\n"""'], {}), '("""Name: name\nVersion: 1.0\n""")\n', (162, 194), False, 'import io\n')]
|
#!/usr/bin/env python
# This "flattens" a LaTeX document by replacing all
# \input{X} lines w/ the text actually contained in X. See
# associated README.md for details.
# Use as a python module in a python script by saying import flatex then flatex.main(in file, out file)
import os
import re
import sys
def is_input(line):
"""
Determines whether or not a read in line contains an
uncommented out \input{} statement. Allows only spaces between
start of line and '\input{}'.
"""
#tex_input_re = r"""^\s*\\input{[^}]*}""" # input only
tex_input_re = r"""(^[^\%]*\\input{[^}]*})|(^[^\%]*\\include{[^}]*})""" # input or include
return re.search(tex_input_re, line)
def get_input(line):
"""
Gets the file name from a line containing an input statement.
"""
tex_input_filename_re = r"""{[^}]*"""
m = re.search(tex_input_filename_re, line)
return m.group()[1:]
def combine_path(base_path, relative_ref):
"""
Combines the base path of the tex document being worked on
with the the relate reference found in that document.
"""
#if (base_path != ""):
#print "os.getcwd()", os.getcwd()
#os.chdir(base_path)
filePath = os.path.abspath(relative_ref)
filePath = filePath + ".tex"
return filePath
def expand_file(base_file):
"""
Recursively-defined function that takes as input a file and
returns it with all the inputs replaced with the contents of the
referenced file.
"""
output_lines = []
f = open(base_file, "r")
for line in f:
if is_input(line):
new_base_file = combine_path(current_path, get_input(line))
output_lines += expand_file(new_base_file)
output_lines.append('\n') # add a new line after each file input
else:
output_lines.append(line)
f.close()
return output_lines
def main(base_file, output_file):
g = open(output_file, "w")
g.write(''.join(expand_file(base_file)))
g.close()
return None
if __name__ == '__main__':
base_file, output_file = sys.argv[1:]
current_path = os.path.split(base_file)[0]
main(base_file, output_file)
|
[
"os.path.abspath",
"os.path.split",
"re.search"
] |
[((673, 702), 're.search', 're.search', (['tex_input_re', 'line'], {}), '(tex_input_re, line)\n', (682, 702), False, 'import re\n'), ((859, 897), 're.search', 're.search', (['tex_input_filename_re', 'line'], {}), '(tex_input_filename_re, line)\n', (868, 897), False, 'import re\n'), ((1217, 1246), 'os.path.abspath', 'os.path.abspath', (['relative_ref'], {}), '(relative_ref)\n', (1232, 1246), False, 'import os\n'), ((2123, 2147), 'os.path.split', 'os.path.split', (['base_file'], {}), '(base_file)\n', (2136, 2147), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.